repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
djs55/sm | drivers/HBASR.py | 8 | 10540 | #!/usr/bin/python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# HBASR: Hardware HBA LUN driver, e.g. Fibre Channel or SAS or
# hardware based iSCSI
#
import SR, VDI, SRCommand, ISCSISR
import devscan, scsiutil, util, LUNperVDI
import os, sys, re, time
import xs_errors
import xml.dom.minidom
CAPABILITIES = ["SR_PROBE","VDI_CREATE","VDI_DELETE","VDI_ATTACH",
"VDI_DETACH", "VDI_INTRODUCE"]
CONFIGURATION = [ [ 'type', 'HBA type (optional) (e.g. FC, iSCSI, SAS etc..)' ] ]
DRIVER_INFO = {
'name': 'HBA LUN-per-VDI driver',
'description': 'SR plugin which represents LUNs as VDIs sourced by hardware HBA adapters, e.g. hardware-based iSCSI or FC support',
'vendor': 'Citrix Systems Inc',
'copyright': '(C) 2008 Citrix Systems Inc',
'driver_version': '1.0',
'required_api_version': '1.0',
'capabilities': CAPABILITIES,
'configuration': CONFIGURATION
}
class HBASR(SR.SR):
"""HBA storage repository"""
def handles(type):
if type == "hba":
return True
return False
handles = staticmethod(handles)
def load(self, sr_uuid):
self.sr_vditype = 'phy'
self.type = "any"
if self.dconf.has_key('type') and self.dconf['type']:
self.type = self.dconf['type']
self.attached = False
self.procname = ""
self.devs = {}
def _init_hbadict(self):
if not hasattr(self, "hbas"):
dict = devscan.adapters(filterstr=self.type)
self.hbadict = dict['devs']
self.hbas = dict['adt']
if len(self.hbas):
self.attached = True
self.devs = scsiutil.cacheSCSIidentifiers()
def _init_hba_hostname(self):
""" get the HBA Host WWN information on this host function """
fc_xml = self._probe_hba()
nodewwnval = ''
try:
fcs = xml.dom.minidom.parseString(fc_xml)
infos = fcs.getElementsByTagName("HBAInfo")
for info in infos:
nodewwn = info.getElementsByTagName("nodeWWN")
nodewwnval = str(nodewwn[0].firstChild.nodeValue)
break
except:
raise xs_errors.XenError('XMLParse', opterr='HBA Host WWN scanning failed')
return nodewwnval
def _init_hbas(self):
""" get the HBA information on this host function """
fc_xml = self._probe_hba()
adt = {}
try:
fcs = xml.dom.minidom.parseString(fc_xml)
infos = fcs.getElementsByTagName("HBAInfo")
# HBAInfo --> Port --> portWWN
# HBAInfo --> Port --> deviceName
for info in infos:
ports = info.getElementsByTagName("Port")
for port in ports:
portwwns = port.getElementsByTagName("portWWN")
devnames = port.getElementsByTagName("deviceName")
portval = str(portwwns[0].firstChild.nodeValue)
devpath = str(devnames[0].firstChild.nodeValue).split('/')[-1]
adt[devpath] = portval.split()[0]
except:
raise xs_errors.XenError('XMLParse', \
opterr='HBA scanning failed')
return adt
def _probe_hba(self):
try:
# use sysfs tree to gather FC data
dom = xml.dom.minidom.Document()
hbalist = dom.createElement("HBAInfoList")
dom.appendChild(hbalist)
hostlist = util.listdir("/sys/class/fc_host")
for host in hostlist:
hbainfo = dom.createElement("HBAInfo")
hbalist.appendChild(hbainfo)
cmd = ["cat", "/sys/class/fc_host/%s/symbolic_name" % host]
sname = util.pread(cmd)[:-1]
entry = dom.createElement("model")
hbainfo.appendChild(entry)
textnode = dom.createTextNode(sname)
entry.appendChild(textnode)
cmd = ["cat", "/sys/class/fc_host/%s/node_name" % host]
nname = util.pread(cmd)[:-1]
nname = util.make_WWN(nname)
entry = dom.createElement("nodeWWN")
hbainfo.appendChild(entry)
# adjust nodename to look like expected string
textnode = dom.createTextNode(nname)
entry.appendChild(textnode)
port = dom.createElement("Port")
hbainfo.appendChild(port)
cmd = ["cat", "/sys/class/fc_host/%s/port_name" % host]
pname = util.pread(cmd)[:-1]
pname = util.make_WWN(pname)
entry = dom.createElement("portWWN")
port.appendChild(entry)
# adjust nodename to look like expected string
textnode = dom.createTextNode(pname)
entry.appendChild(textnode)
cmd = ["cat", "/sys/class/fc_host/%s/port_state" % host]
state = util.pread(cmd)[:-1]
entry = dom.createElement("state")
port.appendChild(entry)
# adjust nodename to look like expected string
textnode = dom.createTextNode(state)
entry.appendChild(textnode)
entry = dom.createElement("deviceName")
port.appendChild(entry)
# adjust nodename to look like expected string
textnode = dom.createTextNode("/sys/class/scsi_host/%s" % host)
entry.appendChild(textnode)
return dom.toxml()
except:
raise xs_errors.XenError('XMLParse', \
opterr='HBA probe failed')
def attach(self, sr_uuid):
self._mpathHandle()
def detach(self, sr_uuid):
if util._containsVDIinuse(self):
return
return
def create(self, sr_uuid, size):
# Check whether an SR already exists
SRs = self.session.xenapi.SR.get_all_records()
for sr in SRs:
record = SRs[sr]
sm_config = record["sm_config"]
if sm_config.has_key('datatype') and \
sm_config['datatype'] == 'HBA' and \
sm_config['hbatype'] == self.type:
raise xs_errors.XenError('SRInUse')
self._init_hbadict()
if not self.attached:
raise xs_errors.XenError('InvalidDev', \
opterr=('No such HBA Device detected [%s]') % self.type)
if self._loadvdis() > 0:
scanrecord = SR.ScanRecord(self)
scanrecord.synchronise()
try:
self.detach(sr_uuid)
except:
pass
self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
self.sm_config['disktype'] = 'Raw'
self.sm_config['datatype'] = 'HBA'
self.sm_config['hbatype'] = self.type
self.sm_config['multipathable'] = 'true'
self.session.xenapi.SR.set_sm_config(self.sr_ref, self.sm_config)
def delete(self, sr_uuid):
self.detach(sr_uuid)
return
def probe(self):
self._init_hbadict()
self.attach("")
SRs = self.session.xenapi.SR.get_all_records()
Recs = {}
for sr in SRs:
record = SRs[sr]
sm_config = record["sm_config"]
if sm_config.has_key('datatype') and \
sm_config['datatype'] == 'HBA':
Recs[record["uuid"]] = sm_config
return self.srlist_toxml(Recs)
def scan(self, sr_uuid):
self._init_hbadict()
if not self.passthrough:
if not self.attached:
raise xs_errors.XenError('SRUnavailable')
# HBA adapter scan already forced a bus rescan
# Sleep for 2 seconds to allow devices to settle
time.sleep(2)
self._loadvdis()
self.physical_utilisation = self.physical_size
for uuid, vdi in self.vdis.iteritems():
if vdi.managed:
self.physical_utilisation += vdi.size
self.virtual_allocation = self.physical_utilisation
return super(HBASR, self).scan(sr_uuid)
def print_devs(self):
self.attach("")
self._init_hbadict()
return devscan.scan(self)
# This function returns a dictionary of HBA attached LUNs
def _loadvdis(self):
if self.vdis:
return
self._init_hbadict()
count = 0
for key in self.hbadict.iterkeys():
vdi_path = os.path.join("/dev",key)
if not self.devs.has_key(vdi_path):
continue
uuid = scsiutil.gen_uuid_from_string(scsiutil.getuniqueserial(vdi_path))
obj = self.vdi(uuid)
path = self.mpathmodule.path(scsiutil.getSCSIid(vdi_path))
ids = self.devs[vdi_path]
obj._query(vdi_path, ids[4])
self.vdis[uuid] = obj
self.physical_size += obj.size
count += 1
return count
def _getLUNbySMconfig(self, sm_config):
raise xs_errors.XenError('VDIUnavailable')
def vdi(self, uuid):
return LUNperVDI.RAWVDI(self, uuid)
def srlist_toxml(self, SRs):
dom = xml.dom.minidom.Document()
element = dom.createElement("SRlist")
dom.appendChild(element)
for val in SRs:
record = SRs[val]
entry = dom.createElement('SR')
element.appendChild(entry)
subentry = dom.createElement("UUID")
entry.appendChild(subentry)
textnode = dom.createTextNode(val)
subentry.appendChild(textnode)
return dom.toprettyxml()
if __name__ == '__main__':
SRCommand.run(HBASR, DRIVER_INFO)
else:
SR.registerSR(HBASR)
| lgpl-2.1 |
clairetang6/bokeh | sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py | 25 | 1514 | from bokeh.plotting import figure, output_file, show, ColumnDataSource
from bokeh.models import HoverTool
output_file("toolbar.html")
source = ColumnDataSource(
data=dict(
x=[1, 2, 3, 4, 5],
y=[2, 5, 8, 2, 7],
desc=['A', 'b', 'C', 'd', 'E'],
imgs = [
'http://bokeh.pydata.org/static/snake.jpg',
'http://bokeh.pydata.org/static/snake2.png',
'http://bokeh.pydata.org/static/snake3D.png',
'http://bokeh.pydata.org/static/snake4_TheRevenge.png',
'http://bokeh.pydata.org/static/snakebite.jpg'
]
)
)
hover = HoverTool(
tooltips="""
<div>
<div>
<img
src="@imgs" height="42" alt="@imgs" width="42"
style="float: left; margin: 0px 15px 15px 0px;"
border="2"
></img>
</div>
<div>
<span style="font-size: 17px; font-weight: bold;">@desc</span>
<span style="font-size: 15px; color: #966;">[$index]</span>
</div>
<div>
<span style="font-size: 15px;">Location</span>
<span style="font-size: 10px; color: #696;">($x, $y)</span>
</div>
</div>
"""
)
p = figure(plot_width=400, plot_height=400, tools=[hover],
title="Mouse over the dots")
p.circle('x', 'y', size=20, source=source)
show(p)
| bsd-3-clause |
quantmind/pulsar | examples/httpbin/manage.py | 1 | 13827 | '''Pulsar HTTP test application::
python manage.py
Implementation
======================
.. autoclass:: HttpBin
:members:
:member-order: bysource
Server Hooks
===================
This example shows how to use
:ref:`server hooks <setting-section-application-hooks>` to log each request
.. automodule:: examples.httpbin.config
:members:
'''
import os
import sys
import string
from functools import partial
from itertools import repeat, chain
from random import random
from base64 import b64encode
from pulsar import version, JAPANESE, CHINESE, HINDI
from pulsar.api import HttpRedirect, HttpException
from pulsar.utils.httpurl import ENCODE_URL_METHODS, ENCODE_BODY_METHODS
from pulsar.utils.html import escape
from pulsar.apps import wsgi, ws
from pulsar.apps.wsgi import route, Html, HtmlDocument, GZipMiddleware, String
from pulsar.utils.system import json
from multidict import CIMultiDict, MultiDict
METHODS = frozenset(chain((m.lower() for m in ENCODE_URL_METHODS),
(m.lower() for m in ENCODE_BODY_METHODS)))
pyversion = '.'.join(map(str, sys.version_info[:3]))
ASSET_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets')
FAVICON = os.path.join(ASSET_DIR, 'favicon.ico')
characters = string.ascii_letters + string.digits
def asset(name, mode='r'):
name = os.path.join(ASSET_DIR, name)
if os.path.isfile(name):
with open(name, mode) as file:
data = file.read()
return data
def as_dict(m):
if isinstance(m, MultiDict):
return dict(((k, m.getall(k)) for k in m))
return m
class BaseRouter(wsgi.Router):
########################################################################
# INTERNALS
def info_data_response(self, request, **params):
data = self.info_data(request, **params)
return request.json_response(data)
def info_data(self, request, **params):
headers = self.getheaders(request)
data = {'method': request.method,
'headers': headers,
'pulsar': self.pulsar_info(request)}
if request.method in ENCODE_URL_METHODS:
data['args'] = as_dict(request.url_data)
else:
args, files = request.data_and_files()
jfiles = MultiDict()
if files:
for name, part in files.items():
try:
part = part.string()
except UnicodeError:
part = part.base64()
jfiles.add(name, part)
data.update((('args', as_dict(args)),
('files', as_dict(jfiles))))
data.update(params)
return data
def getheaders(self, request):
headers = CIMultiDict()
for k in request.environ:
if k.startswith('HTTP_'):
headers[k[5:].replace('_', '-')] = request.environ[k]
return dict(headers)
def pulsar_info(self, request):
return request.cache.connection.info()
class HttpBin(BaseRouter):
'''The main :class:`.Router` for the HttpBin application
'''
def get(self, request):
'''The home page of this router'''
ul = Html('ul')
for router in sorted(self.routes, key=lambda r: r.creation_count):
a = router.link(escape(router.route.path))
a.addClass(router.name)
for method in METHODS:
if router.getparam(method):
a.addClass(method)
li = Html('li', a, ' %s' % router.getparam('title', ''))
ul.append(li)
title = 'Pulsar'
html = request.html_document
html.head.title = title
html.head.links.append('httpbin.css')
html.head.links.append('favicon.ico', rel="icon", type='image/x-icon')
html.head.scripts.append('httpbin.js')
ul = ul.to_string(request)
templ = asset('template.html')
body = templ % (title, JAPANESE, CHINESE, version, pyversion, ul)
html.body.append(body)
return html.http_response(request)
def head(self, request):
return self.get(request)
@route(title='Returns GET data')
def get_get(self, request):
return self.info_data_response(request)
@route(title='Returns POST data')
def post_post(self, request):
return self.info_data_response(request)
@route(title='Returns Post bytes data')
def post_post_chunks(self, request):
data, _ = request.data_and_files()
content_type = request.get('CONTENT_TYPE')
request.response.content_type = content_type
request.response.content = data
return request.response
@route(title='Returns PATCH data')
def patch_patch(self, request):
return self.info_data_response(request)
@route(title='Returns PUT data')
def put_put(self, request):
return self.info_data_response(request)
@route(title='Returns DELETE data')
def delete_delete(self, request):
return self.info_data_response(request)
@route('redirect/<int(min=1,max=10):times>', defaults={'times': 5},
title='302 Redirect n times')
def redirect(self, request):
num = request.urlargs['times'] - 1
if num:
raise HttpRedirect('/redirect/%s' % num)
else:
raise HttpRedirect('/get')
@route('getsize/<int(min=1,max=8388608):size>', defaults={'size': 150000},
title='Returns a preset size of data (limit at 8MB)')
def getsize(self, request):
size = request.urlargs['size']
data = {'size': size, 'data': 'd' * size}
return self.info_data_response(request, **data)
@route(title='Returns gzip encoded data')
def gzip(self, request):
response = self.info_data_response(request, gzipped=True)
return GZipMiddleware(10)(request.environ, response)
@route(title='Returns cookie data')
def cookies(self, request):
cookies = request.cookies
d = dict(((c.key, c.value) for c in cookies.values()))
return request.json_response({'cookies': d})
@route('cookies/set/<name>/<value>', title='Sets a simple cookie',
defaults={'name': 'package', 'value': 'pulsar'})
def request_cookies_set(self, request):
key = request.urlargs['name']
value = request.urlargs['value']
request.response.set_cookie(key, value=value)
request.response.status_code = 302
request.response.headers['location'] = '/cookies'
return request.response
@route('status/<int(min=100,max=505):status>',
title='Returns given HTTP Status code',
defaults={'status': 418})
def status(self, request):
request.response.content_type = 'text/html'
msg = request.url_data.get('message', 'test error')
raise HttpException(msg, status=request.urlargs['status'])
@route(title='Returns response headers')
def response_headers(self, request):
class Gen:
headers = None
def __call__(self, server, data=None):
headers = {}
for hv in bytes(data).decode('utf-8').split('\r\n'):
hv = hv.split(':')
if len(hv) >= 2:
headers[hv[0].strip()] = (':'.join(hv[1:])).strip()
self.headers = json.dumps(headers).encode('utf-8')
def generate(self):
# yield a byte so that headers are sent
yield b''
# we must have the headers now
yield self.headers
gen = Gen()
request.cache.event('on_headers').bind(gen)
request.response.content = gen.generate()
request.response.content_type = 'application/json'
return request.response
@route('basic-auth/<username>/<password>',
title='Challenges HTTPBasic Auth',
defaults={'username': 'username', 'password': 'password'})
def challenge_auth(self, request):
auth = request.get('http.authorization')
if auth and auth.authenticated(request.environ, **request.urlargs):
return request.json_response({'authenticated': True,
'username': auth.username})
raise wsgi.HttpAuthenticate('basic')
@route('digest-auth/<username>/<password>/<qop>',
title='Challenges HTTP Digest Auth',
defaults={'username': 'username',
'password': 'password',
'qop': 'auth'})
def challenge_digest_auth(self, request):
auth = request.get('http.authorization')
if auth and auth.authenticated(request.environ, **request.urlargs):
return request.json_response({'authenticated': True,
'username': auth.username})
raise wsgi.HttpAuthenticate('digest', qop=[request.urlargs['qop']])
@route('stream/<int(min=1):m>/<int(min=1):n>',
title='Stream m chunk of data n times',
defaults={'m': 300, 'n': 20})
def request_stream(self, request):
m = request.urlargs['m']
n = request.urlargs['n']
request.response.content_type = 'text/plain'
request.response.content = repeat(b'a' * m, n)
return request.response
@route(title='A web socket graph')
def websocket(self, request):
data = open(os.path.join(os.path.dirname(__file__),
'assets', 'websocket.html')).read()
scheme = 'wss' if request.is_secure else 'ws'
host = request.get('HTTP_HOST')
data = data % {'address': '%s://%s/graph-data' % (scheme, host)}
request.response.content_type = 'text/html'
request.response.content = data
return request.response
@route(title='Live server statistics')
def stats(self, request):
'''Live stats for the server.
Try sending lots of requests
'''
# scheme = 'wss' if request.is_secure else 'ws'
# host = request.get('HTTP_HOST')
# address = '%s://%s/stats' % (scheme, host)
doc = HtmlDocument(title='Live server stats', media_path='/assets/')
# docs.head.scripts
return doc.http_response(request)
@route('clip/<int(min=256,max=16777216):chunk_size>',
defaults={'chunk_size': 4096},
title='Show a video clip')
def clip(self, request):
c = request.urlargs['chunk_size']
filepath = os.path.join(ASSET_DIR, 'clip.mp4')
return wsgi.file_response(request, filepath, c)
@route('servername',
title='display the server name')
def servername(self, request):
name = request.get('SERVER_NAME')
return String(name, '\n').http_response(request)
@route(title="Pulsar is several languages")
def get_pulsar(self, request):
data = [
'pulsar',
JAPANESE,
CHINESE,
HINDI
]
return request.json_response(data)
########################################################################
# BENCHMARK ROUTES
@route()
def json(self, request):
return request.json_response({'message': "Hello, World!"})
@route()
def plaintext(self, request):
return String('Hello, World!').http_response(request)
class Upload(BaseRouter):
response_content_types = ['multipart/form-data']
async def put(self, request):
headers = self.getheaders(request)
data = {'method': request.method,
'headers': headers,
'pulsar': self.pulsar_info(request),
'args': MultiDict(),
'files': MultiDict()}
request.cache.response_data = data
await request.data_and_files(stream=partial(self.stream, request))
data['args'] = as_dict(data['args'])
data['files'] = as_dict(data['files'])
return request.json_response(data)
def stream(self, request, part):
if request.cache.get('current_data') is not part:
request.cache.current_data = part
request.cache.current_data_buffer = []
request.cache.current_data_buffer.append(part.recv())
if part.complete():
data_store = request.cache.response_data
data = b''.join(request.cache.current_data_buffer)
store = data_store['args']
if part.is_file():
store = data_store['files']
try:
data.decode('utf-8')
except UnicodeError:
data = b64encode(data)
store.add(part.name, data.decode('utf-8'))
class ExpectFail(BaseRouter):
def post(self, request):
request.get('wsgi.input').fail()
class Graph(ws.WS):
def on_message(self, websocket, msg):
websocket.write(json.dumps([(i, random()) for i in range(100)]))
class Site(wsgi.LazyWsgi):
def setup(self, environ):
router = HttpBin('/')
return wsgi.WsgiHandler([ExpectFail('expect'),
Upload('upload'),
wsgi.wait_for_body_middleware,
wsgi.clean_path_middleware,
wsgi.authorization_middleware,
wsgi.MediaRouter('media', ASSET_DIR,
show_indexes=True),
ws.WebSocket('/graph-data', Graph()),
router])
def server(description=None, **kwargs):
description = description or 'Pulsar HttpBin'
return wsgi.WSGIServer(Site(), description=description, **kwargs)
if __name__ == '__main__': # pragma nocover
server().start()
| bsd-3-clause |
kobejean/tensorflow | tensorflow/python/layers/convolutional.py | 9 | 65368 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the convolutional layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import layers as keras_layers
from tensorflow.python.layers import base
from tensorflow.python.ops import init_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('layers.Conv1D')
class Conv1D(keras_layers.Conv1D, base.Layer):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: An integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv1D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
@tf_export('layers.conv1d')
def conv1d(inputs,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for 1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying the
length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: An integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Conv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@tf_export('layers.Conv2D')
class Conv2D(keras_layers.Conv2D, base.Layer):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
@tf_export('layers.conv2d')
def conv2d(inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the 2D convolution layer.
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@tf_export('layers.Conv3D')
class Conv3D(keras_layers.Conv3D, base.Layer):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth,
height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
dilation_rate: An integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv3D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
@tf_export('layers.conv3d')
def conv3d(inputs,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the 3D convolution layer.
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth,
height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
dilation_rate: An integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@tf_export('layers.SeparableConv1D')
class SeparableConv1D(keras_layers.SeparableConv1D, base.Layer):
"""Depthwise separable 1D convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial
dimensions of the filters.
strides: A single integer specifying the strides
of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: A single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SeparableConv1D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
@tf_export('layers.SeparableConv2D')
class SeparableConv2D(keras_layers.SeparableConv2D, base.Layer):
"""Depthwise separable 2D convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SeparableConv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
@tf_export('layers.separable_conv1d')
def separable_conv1d(inputs,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the depthwise separable 1D convolution layer.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial
dimensions of the filters.
strides: A single integer specifying the strides
of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: A single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = SeparableConv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@tf_export('layers.separable_conv2d')
def separable_conv2d(inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer=None,
pointwise_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the depthwise separable 2D convolution layer.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = SeparableConv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@tf_export('layers.Conv2DTranspose')
class Conv2DTranspose(keras_layers.Conv2DTranspose, base.Layer):
"""Transposed 2D convolution layer (sometimes called 2D Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
@tf_export('layers.conv2d_transpose')
def conv2d_transpose(inputs,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for transposed 2D convolution layer.
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
Arguments:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 2 positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 2 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
activation: Activation function. Set it to `None` to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If `None`, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Conv2DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
@tf_export('layers.Conv3DTranspose')
class Conv3DTranspose(keras_layers.Conv3DTranspose, base.Layer):
"""Transposed 3D convolution layer (sometimes called 3D Deconvolution).
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for all spatial
dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides
of the convolution along the depth, height and width.
Can be a single integer to specify the same value for all spatial
dimensions.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
activation: Activation function. Set it to `None` to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If `None`, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv3DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
@tf_export('layers.conv3d_transpose')
def conv3d_transpose(inputs,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for transposed 3D convolution layer.
Arguments:
inputs: Input tensor.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of 3 positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of 3 positive integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Conv3DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
# Aliases
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Deconvolution2D = Deconv2D = Conv2DTranspose
Convolution3DTranspose = Deconvolution3D = Deconv3D = Conv3DTranspose
convolution1d = conv1d
convolution2d = conv2d
convolution3d = conv3d
separable_convolution2d = separable_conv2d
convolution2d_transpose = deconvolution2d = deconv2d = conv2d_transpose
convolution3d_transpose = deconvolution3d = deconv3d = conv3d_transpose
| apache-2.0 |
cryptickp/libcloud | libcloud/compute/drivers/opennebula.py | 49 | 43600 | # Copyright 2002-2009, Distributed Systems Architecture Group, Universidad
# Complutense de Madrid (dsa-research.org)
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenNebula.org driver.
"""
__docformat__ = 'epytext'
from base64 import b64encode
import hashlib
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import next
from libcloud.utils.py3 import b
from libcloud.compute.base import NodeState, NodeDriver, Node, NodeLocation
from libcloud.common.base import ConnectionUserAndKey, XmlResponse
from libcloud.compute.base import NodeImage, NodeSize, StorageVolume
from libcloud.common.types import InvalidCredsError
from libcloud.compute.providers import Provider
__all__ = [
'ACTION',
'OpenNebulaResponse',
'OpenNebulaConnection',
'OpenNebulaNodeSize',
'OpenNebulaNetwork',
'OpenNebulaNodeDriver',
'OpenNebula_1_4_NodeDriver',
'OpenNebula_2_0_NodeDriver',
'OpenNebula_3_0_NodeDriver',
'OpenNebula_3_2_NodeDriver',
'OpenNebula_3_8_NodeDriver']
API_HOST = ''
API_PORT = (4567, 443)
API_SECURE = True
API_PLAIN_AUTH = False
DEFAULT_API_VERSION = '3.2'
class ACTION(object):
"""
All actions, except RESUME, only apply when the VM is in the "Running"
state.
"""
STOP = 'STOPPED'
"""
The VM is stopped, and its memory state stored to a checkpoint file. VM
state, and disk image, are transferred back to the front-end. Resuming
the VM requires the VM instance to be re-scheduled.
"""
SUSPEND = 'SUSPENDED'
"""
The VM is stopped, and its memory state stored to a checkpoint file. The VM
state, and disk image, are left on the host to be resumed later. Resuming
the VM does not require the VM to be re-scheduled. Rather, after
suspending, the VM resources are reserved for later resuming.
"""
RESUME = 'RESUME'
"""
The VM is resumed using the saved memory state from the checkpoint file,
and the VM's disk image. The VM is either started immediately, or
re-scheduled depending on how it was suspended.
"""
CANCEL = 'CANCEL'
"""
The VM is forcibly shutdown, its memory state is deleted. If a persistent
disk image was used, that disk image is transferred back to the front-end.
Any non-persistent disk images are deleted.
"""
SHUTDOWN = 'SHUTDOWN'
"""
The VM is gracefully shutdown by sending the ACPI signal. If the VM does
not shutdown, then it is considered to still be running. If successfully,
shutdown, its memory state is deleted. If a persistent disk image was used,
that disk image is transferred back to the front-end. Any non-persistent
disk images are deleted.
"""
REBOOT = 'REBOOT'
"""
Introduced in OpenNebula v3.2.
The VM is gracefully restarted by sending the ACPI signal.
"""
DONE = 'DONE'
"""
The VM is forcibly shutdown, its memory state is deleted. If a persistent
disk image was used, that disk image is transferred back to the front-end.
Any non-persistent disk images are deleted.
"""
class OpenNebulaResponse(XmlResponse):
"""
XmlResponse class for the OpenNebula.org driver.
"""
def success(self):
"""
Check if response has the appropriate HTTP response code to be a
success.
:rtype: ``bool``
:return: True is success, else False.
"""
i = int(self.status)
return i >= 200 and i <= 299
def parse_error(self):
"""
Check if response contains any errors.
@raise: :class:`InvalidCredsError`
:rtype: :class:`ElementTree`
:return: Contents of HTTP response body.
"""
if int(self.status) == httplib.UNAUTHORIZED:
raise InvalidCredsError(self.body)
return self.body
class OpenNebulaConnection(ConnectionUserAndKey):
"""
Connection class for the OpenNebula.org driver.
with plain_auth support
"""
host = API_HOST
port = API_PORT
secure = API_SECURE
plain_auth = API_PLAIN_AUTH
responseCls = OpenNebulaResponse
def __init__(self, *args, **kwargs):
if 'plain_auth' in kwargs:
self.plain_auth = kwargs.pop('plain_auth')
super(OpenNebulaConnection, self).__init__(*args, **kwargs)
def add_default_headers(self, headers):
"""
Add headers required by the OpenNebula.org OCCI interface.
Includes adding Basic HTTP Authorization headers for authenticating
against the OpenNebula.org OCCI interface.
:type headers: ``dict``
:param headers: Dictionary containing HTTP headers.
:rtype: ``dict``
:return: Dictionary containing updated headers.
"""
if self.plain_auth:
passwd = self.key
else:
passwd = hashlib.sha1(b(self.key)).hexdigest()
headers['Authorization'] =\
('Basic %s' % b64encode(b('%s:%s' % (self.user_id,
passwd))).decode('utf-8'))
return headers
class OpenNebulaNodeSize(NodeSize):
"""
NodeSize class for the OpenNebula.org driver.
"""
def __init__(self, id, name, ram, disk, bandwidth, price, driver,
cpu=None, vcpu=None):
super(OpenNebulaNodeSize, self).__init__(id=id, name=name, ram=ram,
disk=disk,
bandwidth=bandwidth,
price=price, driver=driver)
self.cpu = cpu
self.vcpu = vcpu
def __repr__(self):
return (('<OpenNebulaNodeSize: id=%s, name=%s, ram=%s, disk=%s, '
'bandwidth=%s, price=%s, driver=%s, cpu=%s, vcpu=%s ...>')
% (self.id, self.name, self.ram, self.disk, self.bandwidth,
self.price, self.driver.name, self.cpu, self.vcpu))
class OpenNebulaNetwork(object):
"""
Provide a common interface for handling networks of all types.
Network objects are analogous to physical switches connecting two or
more physical nodes together. The Network object provides the interface in
libcloud through which we can manipulate networks in different cloud
providers in the same way. Network objects don't actually do much directly
themselves, instead the network driver handles the connection to the
network.
You don't normally create a network object yourself; instead you use
a driver and then have that create the network for you.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver()
>>> network = driver.create_network()
>>> network = driver.list_networks()[0]
>>> network.name
'dummy-1'
"""
def __init__(self, id, name, address, size, driver, extra=None):
self.id = str(id)
self.name = name
self.address = address
self.size = size
self.driver = driver
self.uuid = self.get_uuid()
self.extra = extra or {}
def get_uuid(self):
"""
Unique hash for this network.
The hash is a function of an SHA1 hash of the network's ID and
its driver which means that it should be unique between all
networks. In some subclasses (e.g. GoGrid) there is no ID
available so the public IP address is used. This means that,
unlike a properly done system UUID, the same UUID may mean a
different system install at a different time
>>> from libcloud.network.drivers.dummy import DummyNetworkDriver
>>> driver = DummyNetworkDriver()
>>> network = driver.create_network()
>>> network.get_uuid()
'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f'
Note, for example, that this example will always produce the
same UUID!
:rtype: ``str``
:return: Unique identifier for this instance.
"""
return hashlib.sha1(b("%s:%s" % (self.id,
self.driver.type))).hexdigest()
def __repr__(self):
return (('<OpenNebulaNetwork: uuid=%s, name=%s, address=%s, size=%s, '
'provider=%s ...>')
% (self.uuid, self.name, self.address, self.size,
self.driver.name))
class OpenNebulaNodeDriver(NodeDriver):
"""
OpenNebula.org node driver.
"""
connectionCls = OpenNebulaConnection
name = 'OpenNebula'
website = 'http://opennebula.org/'
type = Provider.OPENNEBULA
NODE_STATE_MAP = {
'INIT': NodeState.PENDING,
'PENDING': NodeState.PENDING,
'HOLD': NodeState.PENDING,
'ACTIVE': NodeState.RUNNING,
'STOPPED': NodeState.TERMINATED,
'SUSPENDED': NodeState.PENDING,
'DONE': NodeState.TERMINATED,
'FAILED': NodeState.TERMINATED}
def __new__(cls, key, secret=None, api_version=DEFAULT_API_VERSION,
**kwargs):
if cls is OpenNebulaNodeDriver:
if api_version in ['1.4']:
cls = OpenNebula_1_4_NodeDriver
elif api_version in ['2.0', '2.2']:
cls = OpenNebula_2_0_NodeDriver
elif api_version in ['3.0']:
cls = OpenNebula_3_0_NodeDriver
elif api_version in ['3.2']:
cls = OpenNebula_3_2_NodeDriver
elif api_version in ['3.6']:
cls = OpenNebula_3_6_NodeDriver
elif api_version in ['3.8']:
cls = OpenNebula_3_8_NodeDriver
if 'plain_auth' not in kwargs:
kwargs['plain_auth'] = cls.plain_auth
else:
cls.plain_auth = kwargs['plain_auth']
else:
raise NotImplementedError(
"No OpenNebulaNodeDriver found for API version %s" %
(api_version))
return super(OpenNebulaNodeDriver, cls).__new__(cls)
def create_node(self, **kwargs):
"""
Create a new OpenNebula node.
@inherits: :class:`NodeDriver.create_node`
:keyword networks: List of virtual networks to which this node should
connect. (optional)
:type networks: :class:`OpenNebulaNetwork` or
``list`` of :class:`OpenNebulaNetwork`
"""
compute = ET.Element('COMPUTE')
name = ET.SubElement(compute, 'NAME')
name.text = kwargs['name']
instance_type = ET.SubElement(compute, 'INSTANCE_TYPE')
instance_type.text = kwargs['size'].name
storage = ET.SubElement(compute, 'STORAGE')
ET.SubElement(storage,
'DISK',
{'image': '%s' % (str(kwargs['image'].id))})
if 'networks' in kwargs:
if not isinstance(kwargs['networks'], list):
kwargs['networks'] = [kwargs['networks']]
networkGroup = ET.SubElement(compute, 'NETWORK')
for network in kwargs['networks']:
if network.address:
ET.SubElement(networkGroup, 'NIC',
{'network': '%s' % (str(network.id)),
'ip': network.address})
else:
ET.SubElement(networkGroup, 'NIC',
{'network': '%s' % (str(network.id))})
xml = ET.tostring(compute)
node = self.connection.request('/compute', method='POST',
data=xml).object
return self._to_node(node)
def destroy_node(self, node):
url = '/compute/%s' % (str(node.id))
resp = self.connection.request(url, method='DELETE')
return resp.status == httplib.OK
def list_nodes(self):
return self._to_nodes(self.connection.request('/compute').object)
def list_images(self, location=None):
return self._to_images(self.connection.request('/storage').object)
def list_sizes(self, location=None):
"""
Return list of sizes on a provider.
@inherits: :class:`NodeDriver.list_sizes`
:return: List of compute node sizes supported by the cloud provider.
:rtype: ``list`` of :class:`OpenNebulaNodeSize`
"""
return [
NodeSize(id=1,
name='small',
ram=None,
disk=None,
bandwidth=None,
price=None,
driver=self),
NodeSize(id=2,
name='medium',
ram=None,
disk=None,
bandwidth=None,
price=None,
driver=self),
NodeSize(id=3,
name='large',
ram=None,
disk=None,
bandwidth=None,
price=None,
driver=self),
]
def list_locations(self):
return [NodeLocation(0, '', '', self)]
def ex_list_networks(self, location=None):
"""
List virtual networks on a provider.
:param location: Location from which to request a list of virtual
networks. (optional)
:type location: :class:`NodeLocation`
:return: List of virtual networks available to be connected to a
compute node.
:rtype: ``list`` of :class:`OpenNebulaNetwork`
"""
return self._to_networks(self.connection.request('/network').object)
def ex_node_action(self, node, action):
"""
Build action representation and instruct node to commit action.
Build action representation from the compute node ID, and the
action which should be carried out on that compute node. Then
instruct the node to carry out that action.
:param node: Compute node instance.
:type node: :class:`Node`
:param action: Action to be carried out on the compute node.
:type action: ``str``
:return: False if an HTTP Bad Request is received, else, True is
returned.
:rtype: ``bool``
"""
compute_node_id = str(node.id)
compute = ET.Element('COMPUTE')
compute_id = ET.SubElement(compute, 'ID')
compute_id.text = compute_node_id
state = ET.SubElement(compute, 'STATE')
state.text = action
xml = ET.tostring(compute)
url = '/compute/%s' % compute_node_id
resp = self.connection.request(url, method='PUT',
data=xml)
if resp.status == httplib.BAD_REQUEST:
return False
else:
return True
def _to_images(self, object):
"""
Request a list of images and convert that list to a list of NodeImage
objects.
Request a list of images from the OpenNebula web interface, and
issue a request to convert each XML object representation of an image
to a NodeImage object.
:rtype: ``list`` of :class:`NodeImage`
:return: List of images.
"""
images = []
for element in object.findall('DISK'):
image_id = element.attrib['href'].partition('/storage/')[2]
image = self.connection.request(
('/storage/%s' % (image_id))).object
images.append(self._to_image(image))
return images
def _to_image(self, image):
"""
Take XML object containing an image description and convert to
NodeImage object.
:type image: :class:`ElementTree`
:param image: XML representation of an image.
:rtype: :class:`NodeImage`
:return: The newly extracted :class:`NodeImage`.
"""
return NodeImage(id=image.findtext('ID'),
name=image.findtext('NAME'),
driver=self.connection.driver,
extra={'size': image.findtext('SIZE'),
'url': image.findtext('URL')})
def _to_networks(self, object):
"""
Request a list of networks and convert that list to a list of
OpenNebulaNetwork objects.
Request a list of networks from the OpenNebula web interface, and
issue a request to convert each XML object representation of a network
to an OpenNebulaNetwork object.
:rtype: ``list`` of :class:`OpenNebulaNetwork`
:return: List of virtual networks.
"""
networks = []
for element in object.findall('NETWORK'):
network_id = element.attrib['href'].partition('/network/')[2]
network_element = self.connection.request(
('/network/%s' % (network_id))).object
networks.append(self._to_network(network_element))
return networks
def _to_network(self, element):
"""
Take XML object containing a network description and convert to
OpenNebulaNetwork object.
Take XML representation containing a network description and
convert to OpenNebulaNetwork object.
:rtype: :class:`OpenNebulaNetwork`
:return: The newly extracted :class:`OpenNebulaNetwork`.
"""
return OpenNebulaNetwork(id=element.findtext('ID'),
name=element.findtext('NAME'),
address=element.findtext('ADDRESS'),
size=element.findtext('SIZE'),
driver=self.connection.driver)
def _to_nodes(self, object):
"""
Request a list of compute nodes and convert that list to a list of
Node objects.
Request a list of compute nodes from the OpenNebula web interface, and
issue a request to convert each XML object representation of a node
to a Node object.
:rtype: ``list`` of :class:`Node`
:return: A list of compute nodes.
"""
computes = []
for element in object.findall('COMPUTE'):
compute_id = element.attrib['href'].partition('/compute/')[2]
compute = self.connection.request(
('/compute/%s' % (compute_id))).object
computes.append(self._to_node(compute))
return computes
def _to_node(self, compute):
"""
Take XML object containing a compute node description and convert to
Node object.
Take XML representation containing a compute node description and
convert to Node object.
:type compute: :class:`ElementTree`
:param compute: XML representation of a compute node.
:rtype: :class:`Node`
:return: The newly extracted :class:`Node`.
"""
try:
state = self.NODE_STATE_MAP[compute.findtext('STATE').upper()]
except KeyError:
state = NodeState.UNKNOWN
return Node(id=compute.findtext('ID'),
name=compute.findtext('NAME'),
state=state,
public_ips=self._extract_networks(compute),
private_ips=[],
driver=self.connection.driver,
image=self._extract_images(compute))
def _extract_networks(self, compute):
"""
Extract networks from a compute node XML representation.
Extract network descriptions from a compute node XML representation,
converting each network to an OpenNebulaNetwork object.
:type compute: :class:`ElementTree`
:param compute: XML representation of a compute node.
:rtype: ``list`` of :class:`OpenNebulaNetwork`s.
:return: List of virtual networks attached to the compute node.
"""
networks = list()
network_list = compute.find('NETWORK')
for element in network_list.findall('NIC'):
networks.append(
OpenNebulaNetwork(id=element.attrib.get('network', None),
name=None,
address=element.attrib.get('ip', None),
size=1,
driver=self.connection.driver))
return networks
def _extract_images(self, compute):
"""
Extract image disks from a compute node XML representation.
Extract image disk descriptions from a compute node XML representation,
converting the disks to an NodeImage object.
:type compute: :class:`ElementTree`
:param compute: XML representation of a compute node.
:rtype: :class:`NodeImage`.
:return: First disk attached to a compute node.
"""
disks = list()
disk_list = compute.find('STORAGE')
if disk_list is not None:
for element in disk_list.findall('DISK'):
disks.append(
NodeImage(id=element.attrib.get('image', None),
name=None,
driver=self.connection.driver,
extra={'dev': element.attrib.get('dev', None)}))
# @TODO: Return all disks when the Node type accepts multiple
# attached disks per node.
if len(disks) > 0:
return disks[0]
else:
return None
class OpenNebula_1_4_NodeDriver(OpenNebulaNodeDriver):
"""
OpenNebula.org node driver for OpenNebula.org v1.4.
"""
name = 'OpenNebula (v1.4)'
class OpenNebula_2_0_NodeDriver(OpenNebulaNodeDriver):
"""
OpenNebula.org node driver for OpenNebula.org v2.0 through OpenNebula.org
v2.2.
"""
name = 'OpenNebula (v2.0 - v2.2)'
def create_node(self, **kwargs):
"""
Create a new OpenNebula node.
@inherits: :class:`NodeDriver.create_node`
:keyword networks: List of virtual networks to which this node should
connect. (optional)
:type networks: :class:`OpenNebulaNetwork` or ``list``
of :class:`OpenNebulaNetwork`
:keyword context: Custom (key, value) pairs to be injected into
compute node XML description. (optional)
:type context: ``dict``
:return: Instance of a newly created node.
:rtype: :class:`Node`
"""
compute = ET.Element('COMPUTE')
name = ET.SubElement(compute, 'NAME')
name.text = kwargs['name']
instance_type = ET.SubElement(compute, 'INSTANCE_TYPE')
instance_type.text = kwargs['size'].name
disk = ET.SubElement(compute, 'DISK')
ET.SubElement(disk,
'STORAGE',
{'href': '/storage/%s' % (str(kwargs['image'].id))})
if 'networks' in kwargs:
if not isinstance(kwargs['networks'], list):
kwargs['networks'] = [kwargs['networks']]
for network in kwargs['networks']:
nic = ET.SubElement(compute, 'NIC')
ET.SubElement(nic, 'NETWORK',
{'href': '/network/%s' % (str(network.id))})
if network.address:
ip_line = ET.SubElement(nic, 'IP')
ip_line.text = network.address
if 'context' in kwargs:
if isinstance(kwargs['context'], dict):
contextGroup = ET.SubElement(compute, 'CONTEXT')
for key, value in list(kwargs['context'].items()):
context = ET.SubElement(contextGroup, key.upper())
context.text = value
xml = ET.tostring(compute)
node = self.connection.request('/compute', method='POST',
data=xml).object
return self._to_node(node)
def destroy_node(self, node):
url = '/compute/%s' % (str(node.id))
resp = self.connection.request(url, method='DELETE')
return resp.status == httplib.NO_CONTENT
def list_sizes(self, location=None):
"""
Return list of sizes on a provider.
@inherits: :class:`NodeDriver.list_sizes`
:return: List of compute node sizes supported by the cloud provider.
:rtype: ``list`` of :class:`OpenNebulaNodeSize`
"""
return [
OpenNebulaNodeSize(id=1,
name='small',
ram=1024,
cpu=1,
disk=None,
bandwidth=None,
price=None,
driver=self),
OpenNebulaNodeSize(id=2,
name='medium',
ram=4096,
cpu=4,
disk=None,
bandwidth=None,
price=None,
driver=self),
OpenNebulaNodeSize(id=3,
name='large',
ram=8192,
cpu=8,
disk=None,
bandwidth=None,
price=None,
driver=self),
OpenNebulaNodeSize(id=4,
name='custom',
ram=0,
cpu=0,
disk=None,
bandwidth=None,
price=None,
driver=self),
]
def _to_images(self, object):
"""
Request a list of images and convert that list to a list of NodeImage
objects.
Request a list of images from the OpenNebula web interface, and
issue a request to convert each XML object representation of an image
to a NodeImage object.
:rtype: ``list`` of :class:`NodeImage`
:return: List of images.
"""
images = []
for element in object.findall('STORAGE'):
image_id = element.attrib["href"].partition("/storage/")[2]
image = self.connection.request(
("/storage/%s" % (image_id))).object
images.append(self._to_image(image))
return images
def _to_image(self, image):
"""
Take XML object containing an image description and convert to
NodeImage object.
:type image: :class:`ElementTree`
:param image: XML representation of an image.
:rtype: :class:`NodeImage`
:return: The newly extracted :class:`NodeImage`.
"""
return NodeImage(id=image.findtext('ID'),
name=image.findtext('NAME'),
driver=self.connection.driver,
extra={'description': image.findtext('DESCRIPTION'),
'type': image.findtext('TYPE'),
'size': image.findtext('SIZE'),
'fstype': image.findtext('FSTYPE', None)})
def _to_node(self, compute):
"""
Take XML object containing a compute node description and convert to
Node object.
Take XML representation containing a compute node description and
convert to Node object.
:type compute: :class:`ElementTree`
:param compute: XML representation of a compute node.
:rtype: :class:`Node`
:return: The newly extracted :class:`Node`.
"""
try:
state = self.NODE_STATE_MAP[compute.findtext('STATE').upper()]
except KeyError:
state = NodeState.UNKNOWN
return Node(id=compute.findtext('ID'),
name=compute.findtext('NAME'),
state=state,
public_ips=self._extract_networks(compute),
private_ips=[],
driver=self.connection.driver,
image=self._extract_images(compute),
size=self._extract_size(compute),
extra={'context': self._extract_context(compute)})
def _extract_networks(self, compute):
"""
Extract networks from a compute node XML representation.
Extract network descriptions from a compute node XML representation,
converting each network to an OpenNebulaNetwork object.
:type compute: :class:`ElementTree`
:param compute: XML representation of a compute node.
:rtype: ``list`` of :class:`OpenNebulaNetwork`
:return: List of virtual networks attached to the compute node.
"""
networks = []
for element in compute.findall('NIC'):
network = element.find('NETWORK')
network_id = network.attrib['href'].partition('/network/')[2]
networks.append(
OpenNebulaNetwork(id=network_id,
name=network.attrib.get('name', None),
address=element.findtext('IP'),
size=1,
driver=self.connection.driver,
extra={'mac': element.findtext('MAC')}))
return networks
def _extract_images(self, compute):
"""
Extract image disks from a compute node XML representation.
Extract image disk descriptions from a compute node XML representation,
converting the disks to an NodeImage object.
:type compute: :class:`ElementTree`
:param compute: XML representation of a compute node.
:rtype: ``list`` of :class:`NodeImage`
:return: Disks attached to a compute node.
"""
disks = list()
for element in compute.findall('DISK'):
disk = element.find('STORAGE')
image_id = disk.attrib['href'].partition('/storage/')[2]
if 'id' in element.attrib:
disk_id = element.attrib['id']
else:
disk_id = None
disks.append(
NodeImage(id=image_id,
name=disk.attrib.get('name', None),
driver=self.connection.driver,
extra={'type': element.findtext('TYPE'),
'disk_id': disk_id,
'target': element.findtext('TARGET')}))
# Return all disks when the Node type accepts multiple attached disks
# per node.
if len(disks) > 1:
return disks
elif len(disks) == 1:
return disks[0]
else:
return None
def _extract_size(self, compute):
"""
Extract size, or node type, from a compute node XML representation.
Extract node size, or node type, description from a compute node XML
representation, converting the node size to a NodeSize object.
:type compute: :class:`ElementTree`
:param compute: XML representation of a compute node.
:rtype: :class:`OpenNebulaNodeSize`
:return: Node type of compute node.
"""
instance_type = compute.find('INSTANCE_TYPE')
try:
return next((node_size for node_size in self.list_sizes()
if node_size.name == instance_type.text))
except StopIteration:
return None
def _extract_context(self, compute):
"""
Extract size, or node type, from a compute node XML representation.
Extract node size, or node type, description from a compute node XML
representation, converting the node size to a NodeSize object.
:type compute: :class:`ElementTree`
:param compute: XML representation of a compute node.
:rtype: ``dict``
:return: Dictionary containing (key, value) pairs related to
compute node context.
"""
contexts = dict()
context = compute.find('CONTEXT')
if context is not None:
for context_element in list(context):
contexts[context_element.tag.lower()] = context_element.text
return contexts
class OpenNebula_3_0_NodeDriver(OpenNebula_2_0_NodeDriver):
"""
OpenNebula.org node driver for OpenNebula.org v3.0.
"""
name = 'OpenNebula (v3.0)'
def ex_node_set_save_name(self, node, name):
"""
Build action representation and instruct node to commit action.
Build action representation from the compute node ID, the disk image
which will be saved, and the name under which the image will be saved
upon shutting down the compute node.
:param node: Compute node instance.
:type node: :class:`Node`
:param name: Name under which the image should be saved after shutting
down the compute node.
:type name: ``str``
:return: False if an HTTP Bad Request is received, else, True is
returned.
:rtype: ``bool``
"""
compute_node_id = str(node.id)
compute = ET.Element('COMPUTE')
compute_id = ET.SubElement(compute, 'ID')
compute_id.text = compute_node_id
disk = ET.SubElement(compute, 'DISK', {'id': str(node.image.id)})
ET.SubElement(disk, 'STORAGE',
{'href': '/storage/%s' % (str(node.image.id)),
'name': node.image.name})
ET.SubElement(disk, 'SAVE_AS', {'name': str(name)})
xml = ET.tostring(compute)
url = '/compute/%s' % compute_node_id
resp = self.connection.request(url, method='PUT',
data=xml)
if resp.status == httplib.BAD_REQUEST:
return False
else:
return True
def _to_network(self, element):
"""
Take XML object containing a network description and convert to
OpenNebulaNetwork object.
Take XML representation containing a network description and
convert to OpenNebulaNetwork object.
:return: The newly extracted :class:`OpenNebulaNetwork`.
:rtype: :class:`OpenNebulaNetwork`
"""
return OpenNebulaNetwork(id=element.findtext('ID'),
name=element.findtext('NAME'),
address=element.findtext('ADDRESS'),
size=element.findtext('SIZE'),
driver=self.connection.driver,
extra={'public': element.findtext('PUBLIC')})
class OpenNebula_3_2_NodeDriver(OpenNebula_3_0_NodeDriver):
"""
OpenNebula.org node driver for OpenNebula.org v3.2.
"""
name = 'OpenNebula (v3.2)'
def reboot_node(self, node):
return self.ex_node_action(node, ACTION.REBOOT)
def list_sizes(self, location=None):
"""
Return list of sizes on a provider.
@inherits: :class:`NodeDriver.list_sizes`
:return: List of compute node sizes supported by the cloud provider.
:rtype: ``list`` of :class:`OpenNebulaNodeSize`
"""
return self._to_sizes(self.connection.request('/instance_type').object)
def _to_sizes(self, object):
"""
Request a list of instance types and convert that list to a list of
OpenNebulaNodeSize objects.
Request a list of instance types from the OpenNebula web interface,
and issue a request to convert each XML object representation of an
instance type to an OpenNebulaNodeSize object.
:return: List of instance types.
:rtype: ``list`` of :class:`OpenNebulaNodeSize`
"""
sizes = []
size_id = 1
attributes = [('name', str, None), ('ram', int, 'MEMORY'),
('cpu', float, None), ('vcpu', float, None),
('disk', str, None), ('bandwidth', float, None),
('price', float, None)]
for element in object.findall('INSTANCE_TYPE'):
size_kwargs = {'id': size_id, 'driver': self}
values = self._get_attributes_values(attributes=attributes,
element=element)
size_kwargs.update(values)
size = OpenNebulaNodeSize(**size_kwargs)
sizes.append(size)
size_id += 1
return sizes
def _get_attributes_values(self, attributes, element):
values = {}
for attribute_name, attribute_type, alias in attributes:
key = alias if alias else attribute_name.upper()
value = element.findtext(key)
if value is not None:
value = attribute_type(value)
values[attribute_name] = value
return values
class OpenNebula_3_6_NodeDriver(OpenNebula_3_2_NodeDriver):
"""
OpenNebula.org node driver for OpenNebula.org v3.6.
"""
name = 'OpenNebula (v3.6)'
def create_volume(self, size, name, location=None, snapshot=None):
storage = ET.Element('STORAGE')
vol_name = ET.SubElement(storage, 'NAME')
vol_name.text = name
vol_type = ET.SubElement(storage, 'TYPE')
vol_type.text = 'DATABLOCK'
description = ET.SubElement(storage, 'DESCRIPTION')
description.text = 'Attached storage'
public = ET.SubElement(storage, 'PUBLIC')
public.text = 'NO'
persistent = ET.SubElement(storage, 'PERSISTENT')
persistent.text = 'YES'
fstype = ET.SubElement(storage, 'FSTYPE')
fstype.text = 'ext3'
vol_size = ET.SubElement(storage, 'SIZE')
vol_size.text = str(size)
xml = ET.tostring(storage)
volume = self.connection.request('/storage',
{'occixml': xml},
method='POST').object
return self._to_volume(volume)
def destroy_volume(self, volume):
url = '/storage/%s' % (str(volume.id))
resp = self.connection.request(url, method='DELETE')
return resp.status == httplib.NO_CONTENT
def attach_volume(self, node, volume, device):
action = ET.Element('ACTION')
perform = ET.SubElement(action, 'PERFORM')
perform.text = 'ATTACHDISK'
params = ET.SubElement(action, 'PARAMS')
ET.SubElement(params,
'STORAGE',
{'href': '/storage/%s' % (str(volume.id))})
target = ET.SubElement(params, 'TARGET')
target.text = device
xml = ET.tostring(action)
url = '/compute/%s/action' % node.id
resp = self.connection.request(url, method='POST', data=xml)
return resp.status == httplib.ACCEPTED
def _do_detach_volume(self, node_id, disk_id):
action = ET.Element('ACTION')
perform = ET.SubElement(action, 'PERFORM')
perform.text = 'DETACHDISK'
params = ET.SubElement(action, 'PARAMS')
ET.SubElement(params,
'DISK',
{'id': disk_id})
xml = ET.tostring(action)
url = '/compute/%s/action' % node_id
resp = self.connection.request(url, method='POST', data=xml)
return resp.status == httplib.ACCEPTED
def detach_volume(self, volume):
# We need to find the node using this volume
for node in self.list_nodes():
if type(node.image) is not list:
# This node has only one associated image. It is not the one we
# are after.
continue
for disk in node.image:
if disk.id == volume.id:
# Node found. We can now detach the volume
disk_id = disk.extra['disk_id']
return self._do_detach_volume(node.id, disk_id)
return False
def list_volumes(self):
return self._to_volumes(self.connection.request('/storage').object)
def _to_volume(self, storage):
return StorageVolume(id=storage.findtext('ID'),
name=storage.findtext('NAME'),
size=int(storage.findtext('SIZE')),
driver=self.connection.driver)
def _to_volumes(self, object):
volumes = []
for storage in object.findall('STORAGE'):
storage_id = storage.attrib['href'].partition('/storage/')[2]
volumes.append(self._to_volume(
self.connection.request('/storage/%s' % storage_id).object))
return volumes
class OpenNebula_3_8_NodeDriver(OpenNebula_3_6_NodeDriver):
"""
OpenNebula.org node driver for OpenNebula.org v3.8.
"""
name = 'OpenNebula (v3.8)'
plain_auth = API_PLAIN_AUTH
def _to_sizes(self, object):
"""
Request a list of instance types and convert that list to a list of
OpenNebulaNodeSize objects.
Request a list of instance types from the OpenNebula web interface,
and issue a request to convert each XML object representation of an
instance type to an OpenNebulaNodeSize object.
:return: List of instance types.
:rtype: ``list`` of :class:`OpenNebulaNodeSize`
"""
sizes = []
size_id = 1
attributes = [('name', str, None), ('ram', int, 'MEMORY'),
('cpu', float, None), ('vcpu', float, None),
('disk', str, None), ('bandwidth', float, None),
('price', float, None)]
for element in object.findall('INSTANCE_TYPE'):
element = self.connection.request(
('/instance_type/%s') % (element.attrib['name'])).object
size_kwargs = {'id': size_id, 'driver': self}
values = self._get_attributes_values(attributes=attributes,
element=element)
size_kwargs.update(values)
size = OpenNebulaNodeSize(**size_kwargs)
sizes.append(size)
size_id += 1
return sizes
def _ex_connection_class_kwargs(self):
"""
Set plain_auth as an extra :class:`OpenNebulaConnection_3_8` argument
:return: ``dict`` of :class:`OpenNebulaConnection_3_8` input arguments
"""
return {'plain_auth': self.plain_auth}
| apache-2.0 |
mattbernst/polyhartree | adapters/mopac7.py | 1 | 11941 | # -*- coding:utf-8 mode:python; tab-width:4; indent-tabs-mode:nil; py-indent-offset:4 -*-
import hashlib
import uuid
import cpinterface
import geoprep
import sharedutilities
class Mopac7Job(cpinterface.Job):
def __init__(self, *args, **kw):
super(Mopac7Job, self).__init__(*args, **kw)
self.backend = "mopac7"
#intended to extract XYZ geometric coordinates from a line like
#2 C 1.3220 .0000 .0000
self.geometry_matcher = ([int, str, float, float, float],
[2, 3, 4])
def extract_last_energy(self, data, options={}):
"""Get last energy message from log file and store it as self.energy.
:param data: log file contents
:type data : str
:param options: ignored
:type options : dict
"""
electronic_energy = None
core_repulsion = None
for line in data.split("\n"):
if "ELECTRONIC ENERGY" in line:
electronic_energy = self.n_number_from_line(line, 0, 1)
elif "CORE-CORE REPULSION" in line:
core_repulsion = self.n_number_from_line(line, 0, 1)
#total energy is the sum of electronic energy and core repulsion
if electronic_energy is not None and core_repulsion is not None:
self.energy = self.ev_to_au(electronic_energy + core_repulsion)
self.log("NOTE: energies from semiempirical methods are not directly comparable to ab initio energies")
else:
self.log("Unable to find energy. Electronic energy: {0} Core-core repulsion: {1}".format(electronic_energy, core_repulsion))
def extract_heat_of_formation(self, data, options={}):
"""Get heat of formation from log file and store it as
self.heat_of_formation.
:param data: log file contents
:type data : str
:param options: ignored
:type options : dict
"""
for line in data.split("\n"):
if "HEAT OF FORMATION" in line:
hof = self.n_number_from_line(line, 0, 1)
self.heat_of_formation = self.kcalm_to_au(hof)
def run(self, host="localhost", options={}):
"""Run MOPAC7 on the given host using the run_mopac7 script.
:param host: name of host where job should execute
:type host : str
:param options: ignored
:type options : dict
"""
run_params = self.get_run_config(host)
workdir = self.backend + "-" + str(uuid.uuid1()).replace('-', '')[:16]
path = "{0}/{1}/".format(self.tmpdir, workdir)
deck_hash = hashlib.sha1(self.deck).hexdigest()[:10]
dat_file = "{0}.dat".format(deck_hash)
abs_file = path + dat_file
self.write_file(self.deck, abs_file, host)
#N.B.: run_mopac7 does not like long paths!
base_file = abs_file.split(".dat")[0]
out_file = "{0}.out".format(base_file)
rp = {"path" : path, "input" : base_file, "output" : out_file}
cmd = run_params["cli"].format(**rp)
stdout, returncode = self.execute(cmd, host, bash_shell=True)
self.stdout = stdout
self.logdata = self.read_file(out_file, host)
errors = ["DUE TO PROGRAM BUG", "STOPPED TO AVOID WASTING TIME"]
for e in errors:
if e in self.logdata:
self.runstate = "error"
return
self.extract_last_energy(self.logdata)
self.extract_heat_of_formation(self.logdata)
self.extract_geometry(self.logdata)
self.runstate = "complete"
class Mopac7(cpinterface.MolecularCalculator):
def __init__(self, *args, **kw):
super(Mopac7, self).__init__(*args, **kw)
self.methods = ["semiempirical:am1", "semiempirical:pm3",
"semiempirical:mindo/3", "semiempirical:mndo"]
self.coordinate_choices = ["cartesian", "zmatrix"]
self.references = ["RHF", "UHF"]
def check_element_support(self, system, method):
"""Check that the chosen semiempirical method is parameterized for
all the elements in the system. Supported elements are taken from
section 3.5 of the Mopac7 manual. Unsupported elements will raise
an exception.
Note that MINDO/3 is supported only for certain *pairs* of elements,
and this check may let bad pairs slip through because it is not
pair-aware.
:param system: molecular system
:type system : geoprep.System
:param method: name of method
:type method : str
:return: elements from system
"""
emap = {"semiempirical:mndo" : ["H", "Li", "B", "C", "N", "O", "F",
"Al", "Si", "P", "S", "Cl", "Zn",
"Ge", "Br", "Sn", "I", "Hg", "Pb"],
"semiempirical:am1" : ["H", "B", "C", "N", "O", "F", "Al",
"Si", "P", "S", "Cl", "Zn", "Ge", "Br",
"Sn", "I", "Hg"],
"semiempirical:pm3" : ["H", "Be", "C", "N", "O", "F", "Mg",
"Al", "Si", "P", "S", "Cl", "Zn", "Ga",
"Ge", "As", "Se", "Br", "Cd", "In",
"Sn", "Sb", "Te", "I", "Hg", "Tl", "Pb",
"Bi"],
"semiempirical:mindo/3" : ["H", "B", "C", "N", "O", "F", "Si",
"P", "S", "Cl"]}
elements = system.elements
allowed = emap[method]
for e in elements:
if e not in allowed:
raise ValueError("Element {0} not parameterized for {1}".format(repr(e), repr(method)))
return elements
def check_coordinates(self, coordinate_choice):
if coordinate_choice in self.coordinate_choices:
return coordinate_choice
else:
raise ValueError("Unrecognized coordinates option {0}".format(repr(coordinate_choice)))
def create_geometry(self, system, options={}):
"""Create input geometry for a subsequent calculation.
options:
coordinates: "cartesian" or "zmatrix"
:param system: molecular system data to convert to input geometry
:type system : geoprep.System
:param options: select coordinate system
:type options : dict
:return: a Mopac7 input with geometry specifications
:rtype : str
"""
defaults = {"coordinates" : "cartesian"}
options = dict(defaults.items() + options.items())
coord_choice = self.check_coordinates(options.get("coordinates"))
if coord_choice == "cartesian":
geometry = system.write("mopcrt")
elif coord_choice == "zmatrix":
geometry = system.write("mopin")
return geometry
def make_energy_job(self, system, method, options={}):
"""Create an input specification for a single point energy calculation.
:param system: molecular system for energy calculation
:type system : geoprep.System
:param method: calculation method
:type method : str
:param options: additional keyword based control options
:type options : dict
:return: a Mopac7 single point energy calculation job
:rtype : cpinterface.Job
"""
system = self.fragment_to_system(system)
if method.startswith("semiempirical"):
return self.make_semiempirical_job(system, method, "ENERGY",
options=options)
else:
raise ValueError("Mopac7 does not support {0}".format(method))
def make_opt_job(self, system, method, options={}):
"""Create an input specification for a geometry optimization
calculation. Optimization goal may be to find minimum geometry or
to find a saddle point.
options:
goal: minimize or saddle
:param system: molecular system for energy calculation
:type system : geoprep.System
:param method: calculation method
:type method : str
:param options: additional keyword based control options
:type options : dict
:return: a Mopac7 input for geometry optimization calculation
:rtype : str
"""
system = self.fragment_to_system(system)
if method.startswith("semiempirical"):
return self.make_semiempirical_job(system, method, "OPT",
options=options)
else:
raise ValueError("Mopac7 does not support {0}".format(method))
def make_semiempirical_job(self, system, method, runtyp,
options={}):
"""Create a semiempirical input specification for a calculation.
Mopac7 supports MNDO, MINDO/3, AM1, and PM3 methods.
See Chapter 2 of the Mopac7 manual for keyword details.
:param system: molecular system for calculation
:type system : geoprep.System
:param method: a semiempirical calculation method
:type method : str
:param options: additional keyword based control options
:type options : dict
:return: a Mopac7 semiempirical job
:rtype : Job
"""
defaults = {"reference" : "rhf", "gnorm" : 0.0001, "precise" : True,
"let" : True, "scf_iterations" : 999, "geo_ok" : True,
"goal" : "minimize"}
options = dict(defaults.items() + options.items())
self.check_method(method)
self.check_element_support(system, method)
deck = self.create_geometry(system, options=options)
semethod = method.split("semiempirical:")[-1].upper()
#MNDO is default method in Mopac7, so no keyword provided
mmap = {"MNDO" : "" , "AM1" : "AM1",
"MINDO/3" : "MINDO3", "PM3" : "PM3"}
#RHF is default electronic reference in Mopac7, so no keyword provided
rmap = {"RHF" : "", "UHF" : "UHF"}
controls = []
if system.spin > 6:
raise ValueError("Spin of {0} too large: Mopac7 only works up to sextet".format(system.spin))
else:
spin_map = {1 : "SINGLET", 2 : "DOUBLET", 3 : "TRIPLET",
4 : "QUARTET", 5 : "QUINTET", 6 : "SEXTET"}
spin_name = spin_map[system.spin]
reference = rmap[options["reference"].upper()]
self.check_electronic_reference(reference or options["reference"].upper())
if system.spin > 1 and reference != "UHF":
self.log("Forcing UHF for multiplicity {0}".format(system.spin))
reference = "UHF"
controls.append(reference)
controls.append(spin_name)
controls.append(mmap[semethod])
controls.append("LARGE")
#Default is to optimize geometry; "1SCF" gives a single-point energy
if runtyp == "ENERGY":
controls.append("1SCF")
if options.get("gnorm"):
controls.append("GNORM={0:.5f}".format(options.get("gnorm")))
if options.get("precise"):
controls.append("PRECISE")
if options.get("let"):
controls.append("LET")
if options.get("geo_ok"):
controls.append("GEO-OK")
if options.get("scf_iterations"):
controls.append("ITRY={0}".format(options.get("scf_iterations")))
if system.charge != 0:
controls.append("CHARGE={0}".format(system.charge))
keywords = " ".join([c for c in controls if c])
deck = deck.replace("PUT KEYWORDS HERE", keywords)
job = Mopac7Job(deck=deck, system=system)
return job
| gpl-3.0 |
uxbal/python_intro_ga | exercises/mimic.py | 208 | 2995 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
# +++your code here+++
# LAB(begin solution)
mimic_dict = {}
f = open(filename, 'r')
text = f.read()
f.close()
words = text.split()
prev = ''
for word in words:
if not prev in mimic_dict:
mimic_dict[prev] = [word]
else:
mimic_dict[prev].append(word)
# Could write as: mimic_dict[prev] = mimic_dict.get(prev, []) + [word]
# It's one line, but not totally satisfying.
prev = word
return mimic_dict
# LAB(replace solution)
# return
# LAB(end solution)
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
# +++your code here+++
# LAB(begin solution)
for unused_i in range(200):
print word,
nexts = mimic_dict.get(word) # Returns None if not found
if not nexts:
nexts = mimic_dict[''] # Fallback to '' if not found
word = random.choice(nexts)
# The 'unused_' prefix turns off the lint warning about the unused variable.
# LAB(replace solution)
# return
# LAB(end solution)
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print 'usage: ./mimic.py file-to-read'
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print_mimic(dict, '')
if __name__ == '__main__':
main()
| gpl-2.0 |
jayme-github/beets | test/test_autotag.py | 1 | 36300 | # This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for autotagging functionality.
"""
import os
import shutil
import re
import copy
import _common
from _common import unittest
from beets import autotag
from beets.autotag import match
from beets.autotag.match import Distance
from beets.library import Item
from beets.util import plurality
from beets.autotag import AlbumInfo, TrackInfo
from beets import config
class PluralityTest(unittest.TestCase):
def test_plurality_consensus(self):
objs = [1, 1, 1, 1]
obj, freq = plurality(objs)
self.assertEqual(obj, 1)
self.assertEqual(freq, 4)
def test_plurality_near_consensus(self):
objs = [1, 1, 2, 1]
obj, freq = plurality(objs)
self.assertEqual(obj, 1)
self.assertEqual(freq, 3)
def test_plurality_conflict(self):
objs = [1, 1, 2, 2, 3]
obj, freq = plurality(objs)
self.assert_(obj in (1, 2))
self.assertEqual(freq, 2)
def test_plurality_empty_sequence_raises_error(self):
with self.assertRaises(ValueError):
plurality([])
def test_current_metadata_finds_pluralities(self):
items = [Item({'artist': 'The Beetles', 'album': 'The White Album'}),
Item({'artist': 'The Beatles', 'album': 'The White Album'}),
Item({'artist': 'The Beatles', 'album': 'Teh White Album'})]
likelies, consensus = match.current_metadata(items)
self.assertEqual(likelies['artist'], 'The Beatles')
self.assertEqual(likelies['album'], 'The White Album')
self.assertFalse(consensus['artist'])
def test_current_metadata_artist_consensus(self):
items = [Item({'artist': 'The Beatles', 'album': 'The White Album'}),
Item({'artist': 'The Beatles', 'album': 'The White Album'}),
Item({'artist': 'The Beatles', 'album': 'Teh White Album'})]
likelies, consensus = match.current_metadata(items)
self.assertEqual(likelies['artist'], 'The Beatles')
self.assertEqual(likelies['album'], 'The White Album')
self.assertTrue(consensus['artist'])
def test_albumartist_consensus(self):
items = [Item({'artist': 'tartist1', 'album': 'album',
'albumartist': 'aartist'}),
Item({'artist': 'tartist2', 'album': 'album',
'albumartist': 'aartist'}),
Item({'artist': 'tartist3', 'album': 'album',
'albumartist': 'aartist'})]
likelies, consensus = match.current_metadata(items)
self.assertEqual(likelies['artist'], 'aartist')
self.assertFalse(consensus['artist'])
def test_current_metadata_likelies(self):
fields = ['artist', 'album', 'albumartist', 'year', 'disctotal',
'mb_albumid', 'label', 'catalognum', 'country', 'media',
'albumdisambig']
items = [Item(dict((f, '%s_%s' % (f, i or 1)) for f in fields))
for i in range(5)]
likelies, _ = match.current_metadata(items)
for f in fields:
self.assertEqual(likelies[f], '%s_1' % f)
def _make_item(title, track, artist=u'some artist'):
return Item({
'title': title, 'track': track,
'artist': artist, 'album': u'some album',
'length': 1,
'mb_trackid': '', 'mb_albumid': '', 'mb_artistid': '',
})
def _make_trackinfo():
return [
TrackInfo(u'one', None, u'some artist', length=1, index=1),
TrackInfo(u'two', None, u'some artist', length=1, index=2),
TrackInfo(u'three', None, u'some artist', length=1, index=3),
]
class DistanceTest(unittest.TestCase):
def setUp(self):
self.dist = Distance()
def test_add(self):
self.dist.add('add', 1.0)
self.assertEqual(self.dist._penalties, {'add': [1.0]})
def test_add_equality(self):
self.dist.add_equality('equality', 'ghi', ['abc', 'def', 'ghi'])
self.assertEqual(self.dist._penalties['equality'], [0.0])
self.dist.add_equality('equality', 'xyz', ['abc', 'def', 'ghi'])
self.assertEqual(self.dist._penalties['equality'], [0.0, 1.0])
self.dist.add_equality('equality', 'abc', re.compile(r'ABC', re.I))
self.assertEqual(self.dist._penalties['equality'], [0.0, 1.0, 0.0])
def test_add_expr(self):
self.dist.add_expr('expr', True)
self.assertEqual(self.dist._penalties['expr'], [1.0])
self.dist.add_expr('expr', False)
self.assertEqual(self.dist._penalties['expr'], [1.0, 0.0])
def test_add_number(self):
# Add a full penalty for each number of difference between two numbers.
self.dist.add_number('number', 1, 1)
self.assertEqual(self.dist._penalties['number'], [0.0])
self.dist.add_number('number', 1, 2)
self.assertEqual(self.dist._penalties['number'], [0.0, 1.0])
self.dist.add_number('number', 2, 1)
self.assertEqual(self.dist._penalties['number'], [0.0, 1.0, 1.0])
self.dist.add_number('number', -1, 2)
self.assertEqual(self.dist._penalties['number'], [0.0, 1.0, 1.0, 1.0,
1.0, 1.0])
def test_add_priority(self):
self.dist.add_priority('priority', 'abc', 'abc')
self.assertEqual(self.dist._penalties['priority'], [0.0])
self.dist.add_priority('priority', 'def', ['abc', 'def'])
self.assertEqual(self.dist._penalties['priority'], [0.0, 0.5])
self.dist.add_priority('priority', 'gh', ['ab', 'cd', 'ef',
re.compile('GH', re.I)])
self.assertEqual(self.dist._penalties['priority'], [0.0, 0.5, 0.75])
self.dist.add_priority('priority', 'xyz', ['abc', 'def'])
self.assertEqual(self.dist._penalties['priority'], [0.0, 0.5, 0.75,
1.0])
def test_add_ratio(self):
self.dist.add_ratio('ratio', 25, 100)
self.assertEqual(self.dist._penalties['ratio'], [0.25])
self.dist.add_ratio('ratio', 10, 5)
self.assertEqual(self.dist._penalties['ratio'], [0.25, 1.0])
self.dist.add_ratio('ratio', -5, 5)
self.assertEqual(self.dist._penalties['ratio'], [0.25, 1.0, 0.0])
self.dist.add_ratio('ratio', 5, 0)
self.assertEqual(self.dist._penalties['ratio'], [0.25, 1.0, 0.0, 0.0])
def test_add_string(self):
dist = match.string_dist(u'abc', u'bcd')
self.dist.add_string('string', u'abc', u'bcd')
self.assertEqual(self.dist._penalties['string'], [dist])
def test_distance(self):
config['match']['distance_weights']['album'] = 2.0
config['match']['distance_weights']['medium'] = 1.0
self.dist.add('album', 0.5)
self.dist.add('media', 0.25)
self.dist.add('media', 0.75)
self.assertEqual(self.dist.distance, 0.5)
# __getitem__()
self.assertEqual(self.dist['album'], 0.25)
self.assertEqual(self.dist['media'], 0.25)
def test_max_distance(self):
config['match']['distance_weights']['album'] = 3.0
config['match']['distance_weights']['medium'] = 1.0
self.dist.add('album', 0.5)
self.dist.add('medium', 0.0)
self.dist.add('medium', 0.0)
self.assertEqual(self.dist.max_distance, 5.0)
def test_operators(self):
config['match']['distance_weights']['source'] = 1.0
config['match']['distance_weights']['album'] = 2.0
config['match']['distance_weights']['medium'] = 1.0
self.dist.add('source', 0.0)
self.dist.add('album', 0.5)
self.dist.add('medium', 0.25)
self.dist.add('medium', 0.75)
self.assertEqual(len(self.dist), 2)
self.assertEqual(list(self.dist), [(0.2, 'album'), (0.2, 'medium')])
self.assertTrue(self.dist == 0.4)
self.assertTrue(self.dist < 1.0)
self.assertTrue(self.dist > 0.0)
self.assertEqual(self.dist - 0.4, 0.0)
self.assertEqual(0.4 - self.dist, 0.0)
self.assertEqual(float(self.dist), 0.4)
def test_raw_distance(self):
config['match']['distance_weights']['album'] = 3.0
config['match']['distance_weights']['medium'] = 1.0
self.dist.add('album', 0.5)
self.dist.add('medium', 0.25)
self.dist.add('medium', 0.5)
self.assertEqual(self.dist.raw_distance, 2.25)
def test_sorted(self):
config['match']['distance_weights']['album'] = 4.0
config['match']['distance_weights']['medium'] = 2.0
self.dist.add('album', 0.1875)
self.dist.add('medium', 0.75)
self.assertEqual(self.dist.sorted, [(0.25, 'medium'), (0.125, 'album')])
# Sort by key if distance is equal.
dist = Distance()
dist.add('album', 0.375)
dist.add('medium', 0.75)
self.assertEqual(dist.sorted, [(0.25, 'album'), (0.25, 'medium')])
def test_update(self):
self.dist.add('album', 0.5)
self.dist.add('media', 1.0)
dist = Distance()
dist.add('album', 0.75)
dist.add('album', 0.25)
self.dist.add('media', 0.05)
self.dist.update(dist)
self.assertEqual(self.dist._penalties, {'album': [0.5, 0.75, 0.25],
'media': [1.0, 0.05]})
class TrackDistanceTest(unittest.TestCase):
def test_identical_tracks(self):
item = _make_item(u'one', 1)
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertEqual(dist, 0.0)
def test_different_title(self):
item = _make_item(u'foo', 1)
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertNotEqual(dist, 0.0)
def test_different_artist(self):
item = _make_item(u'one', 1)
item.artist = u'foo'
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertNotEqual(dist, 0.0)
def test_various_artists_tolerated(self):
item = _make_item(u'one', 1)
item.artist = u'Various Artists'
info = _make_trackinfo()[0]
dist = match.track_distance(item, info, incl_artist=True)
self.assertEqual(dist, 0.0)
class AlbumDistanceTest(unittest.TestCase):
def _mapping(self, items, info):
out = {}
for i, t in zip(items, info.tracks):
out[i] = t
return out
def _dist(self, items, info):
return match.distance(items, info, self._mapping(items, info))
def test_identical_albums(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'some artist',
album = u'some album',
tracks = _make_trackinfo(),
va = False,
album_id = None, artist_id = None,
)
self.assertEqual(self._dist(items, info), 0)
def test_incomplete_album(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'some artist',
album = u'some album',
tracks = _make_trackinfo(),
va = False,
album_id = None, artist_id = None,
)
dist = self._dist(items, info)
self.assertNotEqual(dist, 0)
# Make sure the distance is not too great
self.assertTrue(dist < 0.2)
def test_global_artists_differ(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'someone else',
album = u'some album',
tracks = _make_trackinfo(),
va = False,
album_id = None, artist_id = None,
)
self.assertNotEqual(self._dist(items, info), 0)
def test_comp_track_artists_match(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'should be ignored',
album = u'some album',
tracks = _make_trackinfo(),
va = True,
album_id = None, artist_id = None,
)
self.assertEqual(self._dist(items, info), 0)
def test_comp_no_track_artists(self):
# Some VA releases don't have track artists (incomplete metadata).
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'should be ignored',
album = u'some album',
tracks = _make_trackinfo(),
va = True,
album_id = None, artist_id = None,
)
info.tracks[0].artist = None
info.tracks[1].artist = None
info.tracks[2].artist = None
self.assertEqual(self._dist(items, info), 0)
def test_comp_track_artists_do_not_match(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2, u'someone else'))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'some artist',
album = u'some album',
tracks = _make_trackinfo(),
va = True,
album_id = None, artist_id = None,
)
self.assertNotEqual(self._dist(items, info), 0)
def test_tracks_out_of_order(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'three', 2))
items.append(_make_item(u'two', 3))
info = AlbumInfo(
artist = u'some artist',
album = u'some album',
tracks = _make_trackinfo(),
va = False,
album_id = None, artist_id = None,
)
dist = self._dist(items, info)
self.assertTrue(0 < dist < 0.2)
def test_two_medium_release(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 3))
info = AlbumInfo(
artist = u'some artist',
album = u'some album',
tracks = _make_trackinfo(),
va = False,
album_id = None, artist_id = None,
)
info.tracks[0].medium_index = 1
info.tracks[1].medium_index = 2
info.tracks[2].medium_index = 1
dist = self._dist(items, info)
self.assertEqual(dist, 0)
def test_per_medium_track_numbers(self):
items = []
items.append(_make_item(u'one', 1))
items.append(_make_item(u'two', 2))
items.append(_make_item(u'three', 1))
info = AlbumInfo(
artist = u'some artist',
album = u'some album',
tracks = _make_trackinfo(),
va = False,
album_id = None, artist_id = None,
)
info.tracks[0].medium_index = 1
info.tracks[1].medium_index = 2
info.tracks[2].medium_index = 1
dist = self._dist(items, info)
self.assertEqual(dist, 0)
def _mkmp3(path):
shutil.copyfile(os.path.join(_common.RSRC, 'min.mp3'), path)
class AlbumsInDirTest(_common.TestCase):
def setUp(self):
super(AlbumsInDirTest, self).setUp()
# create a directory structure for testing
self.base = os.path.abspath(os.path.join(self.temp_dir, 'tempdir'))
os.mkdir(self.base)
os.mkdir(os.path.join(self.base, 'album1'))
os.mkdir(os.path.join(self.base, 'album2'))
os.mkdir(os.path.join(self.base, 'more'))
os.mkdir(os.path.join(self.base, 'more', 'album3'))
os.mkdir(os.path.join(self.base, 'more', 'album4'))
_mkmp3(os.path.join(self.base, 'album1', 'album1song1.mp3'))
_mkmp3(os.path.join(self.base, 'album1', 'album1song2.mp3'))
_mkmp3(os.path.join(self.base, 'album2', 'album2song.mp3'))
_mkmp3(os.path.join(self.base, 'more', 'album3', 'album3song.mp3'))
_mkmp3(os.path.join(self.base, 'more', 'album4', 'album4song.mp3'))
def test_finds_all_albums(self):
albums = list(autotag.albums_in_dir(self.base))
self.assertEqual(len(albums), 4)
def test_separates_contents(self):
found = []
for _, album in autotag.albums_in_dir(self.base):
found.append(re.search(r'album(.)song', album[0].path).group(1))
self.assertTrue('1' in found)
self.assertTrue('2' in found)
self.assertTrue('3' in found)
self.assertTrue('4' in found)
def test_finds_multiple_songs(self):
for _, album in autotag.albums_in_dir(self.base):
n = re.search(r'album(.)song', album[0].path).group(1)
if n == '1':
self.assertEqual(len(album), 2)
else:
self.assertEqual(len(album), 1)
class MultiDiscAlbumsInDirTest(_common.TestCase):
def setUp(self):
super(MultiDiscAlbumsInDirTest, self).setUp()
self.base = os.path.abspath(os.path.join(self.temp_dir, 'tempdir'))
os.mkdir(self.base)
self.dirs = [
# Nested album, multiple subdirs.
# Also, false positive marker in root dir, and subtitle for disc 3.
os.path.join(self.base, 'ABCD1234'),
os.path.join(self.base, 'ABCD1234', 'cd 1'),
os.path.join(self.base, 'ABCD1234', 'cd 3 - bonus'),
# Nested album, single subdir.
# Also, punctuation between marker and disc number.
os.path.join(self.base, 'album'),
os.path.join(self.base, 'album', 'cd _ 1'),
# Flattened album, case typo.
# Also, false positive marker in parent dir.
os.path.join(self.base, 'artist [CD5]'),
os.path.join(self.base, 'artist [CD5]', 'CAT disc 1'),
os.path.join(self.base, 'artist [CD5]', 'CAt disc 2'),
# Single disc album, sorted between CAT discs.
os.path.join(self.base, 'artist [CD5]', 'CATS'),
]
self.files = [
os.path.join(self.base, 'ABCD1234', 'cd 1', 'song1.mp3'),
os.path.join(self.base, 'ABCD1234', 'cd 3 - bonus', 'song2.mp3'),
os.path.join(self.base, 'ABCD1234', 'cd 3 - bonus', 'song3.mp3'),
os.path.join(self.base, 'album', 'cd _ 1', 'song4.mp3'),
os.path.join(self.base, 'artist [CD5]', 'CAT disc 1', 'song5.mp3'),
os.path.join(self.base, 'artist [CD5]', 'CAt disc 2', 'song6.mp3'),
os.path.join(self.base, 'artist [CD5]', 'CATS', 'song7.mp3'),
]
for path in self.dirs:
os.mkdir(path)
for path in self.files:
_mkmp3(path)
def test_coalesce_nested_album_multiple_subdirs(self):
albums = list(autotag.albums_in_dir(self.base))
self.assertEquals(len(albums), 4)
root, items = albums[0]
self.assertEquals(root, self.dirs[0:3])
self.assertEquals(len(items), 3)
def test_coalesce_nested_album_single_subdir(self):
albums = list(autotag.albums_in_dir(self.base))
root, items = albums[1]
self.assertEquals(root, self.dirs[3:5])
self.assertEquals(len(items), 1)
def test_coalesce_flattened_album_case_typo(self):
albums = list(autotag.albums_in_dir(self.base))
root, items = albums[2]
self.assertEquals(root, self.dirs[6:8])
self.assertEquals(len(items), 2)
def test_single_disc_album(self):
albums = list(autotag.albums_in_dir(self.base))
root, items = albums[3]
self.assertEquals(root, self.dirs[8:])
self.assertEquals(len(items), 1)
def test_do_not_yield_empty_album(self):
# Remove all the MP3s.
for path in self.files:
os.remove(path)
albums = list(autotag.albums_in_dir(self.base))
self.assertEquals(len(albums), 0)
class AssignmentTest(unittest.TestCase):
def item(self, title, track):
return Item({
'title': title, 'track': track,
'mb_trackid': '', 'mb_albumid': '', 'mb_artistid': '',
})
def test_reorder_when_track_numbers_incorrect(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 2))
items.append(self.item(u'two', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
items[2]: trackinfo[1],
})
def test_order_works_with_invalid_track_numbers(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 1))
items.append(self.item(u'two', 1))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
items[2]: trackinfo[1],
})
def test_order_works_with_missing_tracks(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'three', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'two', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [trackinfo[1]])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[1]: trackinfo[2],
})
def test_order_works_with_extra_tracks(self):
items = []
items.append(self.item(u'one', 1))
items.append(self.item(u'two', 2))
items.append(self.item(u'three', 3))
trackinfo = []
trackinfo.append(TrackInfo(u'one', None))
trackinfo.append(TrackInfo(u'three', None))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [items[1]])
self.assertEqual(extra_tracks, [])
self.assertEqual(mapping, {
items[0]: trackinfo[0],
items[2]: trackinfo[1],
})
def test_order_works_when_track_names_are_entirely_wrong(self):
# A real-world test case contributed by a user.
def item(i, length):
return Item({
'artist': u'ben harper',
'album': u'burn to shine',
'title': u'ben harper - Burn to Shine ' + str(i),
'track': i,
'length': length,
'mb_trackid': '', 'mb_albumid': '', 'mb_artistid': '',
})
items = []
items.append(item(1, 241.37243007106997))
items.append(item(2, 342.27781704375036))
items.append(item(3, 245.95070222338137))
items.append(item(4, 472.87662515485437))
items.append(item(5, 279.1759535763187))
items.append(item(6, 270.33333768012))
items.append(item(7, 247.83435613222923))
items.append(item(8, 216.54504531525072))
items.append(item(9, 225.72775379800484))
items.append(item(10, 317.7643606963552))
items.append(item(11, 243.57001238834192))
items.append(item(12, 186.45916150485752))
def info(index, title, length):
return TrackInfo(title, None, length=length, index=index)
trackinfo = []
trackinfo.append(info(1, u'Alone', 238.893))
trackinfo.append(info(2, u'The Woman in You', 341.44))
trackinfo.append(info(3, u'Less', 245.59999999999999))
trackinfo.append(info(4, u'Two Hands of a Prayer', 470.49299999999999))
trackinfo.append(info(5, u'Please Bleed', 277.86599999999999))
trackinfo.append(info(6, u'Suzie Blue', 269.30599999999998))
trackinfo.append(info(7, u'Steal My Kisses', 245.36000000000001))
trackinfo.append(info(8, u'Burn to Shine', 214.90600000000001))
trackinfo.append(info(9, u'Show Me a Little Shame', 224.09299999999999))
trackinfo.append(info(10, u'Forgiven', 317.19999999999999))
trackinfo.append(info(11, u'Beloved One', 243.733))
trackinfo.append(info(12, u'In the Lord\'s Arms', 186.13300000000001))
mapping, extra_items, extra_tracks = \
match.assign_items(items, trackinfo)
self.assertEqual(extra_items, [])
self.assertEqual(extra_tracks, [])
for item, info in mapping.iteritems():
self.assertEqual(items.index(item), trackinfo.index(info))
class ApplyTestUtil(object):
def _apply(self, info=None, per_disc_numbering=False):
info = info or self.info
mapping = {}
for i, t in zip(self.items, info.tracks):
mapping[i] = t
config['per_disc_numbering'] = per_disc_numbering
autotag.apply_metadata(info, mapping)
class ApplyTest(_common.TestCase, ApplyTestUtil):
def setUp(self):
super(ApplyTest, self).setUp()
self.items = []
self.items.append(Item({}))
self.items.append(Item({}))
trackinfo = []
trackinfo.append(TrackInfo(
u'oneNew', 'dfa939ec-118c-4d0f-84a0-60f3d1e6522c', medium=1,
medium_index=1, artist_credit='trackArtistCredit',
artist_sort='trackArtistSort', index=1,
))
trackinfo.append(TrackInfo(u'twoNew',
'40130ed1-a27c-42fd-a328-1ebefb6caef4',
medium=2, medium_index=1, index=2))
self.info = AlbumInfo(
tracks = trackinfo,
artist = u'artistNew',
album = u'albumNew',
album_id = '7edb51cb-77d6-4416-a23c-3a8c2994a2c7',
artist_id = 'a6623d39-2d8e-4f70-8242-0a9553b91e50',
artist_credit = u'albumArtistCredit',
artist_sort = u'albumArtistSort',
albumtype = u'album',
va = False,
mediums = 2,
)
def test_titles_applied(self):
self._apply()
self.assertEqual(self.items[0].title, 'oneNew')
self.assertEqual(self.items[1].title, 'twoNew')
def test_album_and_artist_applied_to_all(self):
self._apply()
self.assertEqual(self.items[0].album, 'albumNew')
self.assertEqual(self.items[1].album, 'albumNew')
self.assertEqual(self.items[0].artist, 'artistNew')
self.assertEqual(self.items[1].artist, 'artistNew')
def test_track_index_applied(self):
self._apply()
self.assertEqual(self.items[0].track, 1)
self.assertEqual(self.items[1].track, 2)
def test_track_total_applied(self):
self._apply()
self.assertEqual(self.items[0].tracktotal, 2)
self.assertEqual(self.items[1].tracktotal, 2)
def test_disc_index_applied(self):
self._apply()
self.assertEqual(self.items[0].disc, 1)
self.assertEqual(self.items[1].disc, 2)
def test_disc_total_applied(self):
self._apply()
self.assertEqual(self.items[0].disctotal, 2)
self.assertEqual(self.items[1].disctotal, 2)
def test_per_disc_numbering(self):
self._apply(per_disc_numbering=True)
self.assertEqual(self.items[0].track, 1)
self.assertEqual(self.items[1].track, 1)
def test_mb_trackid_applied(self):
self._apply()
self.assertEqual(self.items[0].mb_trackid,
'dfa939ec-118c-4d0f-84a0-60f3d1e6522c')
self.assertEqual(self.items[1].mb_trackid,
'40130ed1-a27c-42fd-a328-1ebefb6caef4')
def test_mb_albumid_and_artistid_applied(self):
self._apply()
for item in self.items:
self.assertEqual(item.mb_albumid,
'7edb51cb-77d6-4416-a23c-3a8c2994a2c7')
self.assertEqual(item.mb_artistid,
'a6623d39-2d8e-4f70-8242-0a9553b91e50')
def test_albumtype_applied(self):
self._apply()
self.assertEqual(self.items[0].albumtype, 'album')
self.assertEqual(self.items[1].albumtype, 'album')
def test_album_artist_overrides_empty_track_artist(self):
my_info = copy.deepcopy(self.info)
self._apply(info=my_info)
self.assertEqual(self.items[0].artist, 'artistNew')
self.assertEqual(self.items[0].artist, 'artistNew')
def test_album_artist_overriden_by_nonempty_track_artist(self):
my_info = copy.deepcopy(self.info)
my_info.tracks[0].artist = 'artist1!'
my_info.tracks[1].artist = 'artist2!'
self._apply(info=my_info)
self.assertEqual(self.items[0].artist, 'artist1!')
self.assertEqual(self.items[1].artist, 'artist2!')
def test_artist_credit_applied(self):
self._apply()
self.assertEqual(self.items[0].albumartist_credit, 'albumArtistCredit')
self.assertEqual(self.items[0].artist_credit, 'trackArtistCredit')
self.assertEqual(self.items[1].albumartist_credit, 'albumArtistCredit')
self.assertEqual(self.items[1].artist_credit, 'albumArtistCredit')
def test_artist_sort_applied(self):
self._apply()
self.assertEqual(self.items[0].albumartist_sort, 'albumArtistSort')
self.assertEqual(self.items[0].artist_sort, 'trackArtistSort')
self.assertEqual(self.items[1].albumartist_sort, 'albumArtistSort')
self.assertEqual(self.items[1].artist_sort, 'albumArtistSort')
class ApplyCompilationTest(_common.TestCase, ApplyTestUtil):
def setUp(self):
super(ApplyCompilationTest, self).setUp()
self.items = []
self.items.append(Item({}))
self.items.append(Item({}))
trackinfo = []
trackinfo.append(TrackInfo(
u'oneNew',
'dfa939ec-118c-4d0f-84a0-60f3d1e6522c',
u'artistOneNew',
'a05686fc-9db2-4c23-b99e-77f5db3e5282',
index=1,
))
trackinfo.append(TrackInfo(
u'twoNew',
'40130ed1-a27c-42fd-a328-1ebefb6caef4',
u'artistTwoNew',
'80b3cf5e-18fe-4c59-98c7-e5bb87210710',
index=2,
))
self.info = AlbumInfo(
tracks = trackinfo,
artist = u'variousNew',
album = u'albumNew',
album_id = '3b69ea40-39b8-487f-8818-04b6eff8c21a',
artist_id = '89ad4ac3-39f7-470e-963a-56509c546377',
albumtype = u'compilation',
va = False,
)
def test_album_and_track_artists_separate(self):
self._apply()
self.assertEqual(self.items[0].artist, 'artistOneNew')
self.assertEqual(self.items[1].artist, 'artistTwoNew')
self.assertEqual(self.items[0].albumartist, 'variousNew')
self.assertEqual(self.items[1].albumartist, 'variousNew')
def test_mb_albumartistid_applied(self):
self._apply()
self.assertEqual(self.items[0].mb_albumartistid,
'89ad4ac3-39f7-470e-963a-56509c546377')
self.assertEqual(self.items[1].mb_albumartistid,
'89ad4ac3-39f7-470e-963a-56509c546377')
self.assertEqual(self.items[0].mb_artistid,
'a05686fc-9db2-4c23-b99e-77f5db3e5282')
self.assertEqual(self.items[1].mb_artistid,
'80b3cf5e-18fe-4c59-98c7-e5bb87210710')
def test_va_flag_cleared_does_not_set_comp(self):
self._apply()
self.assertFalse(self.items[0].comp)
self.assertFalse(self.items[1].comp)
def test_va_flag_sets_comp(self):
va_info = copy.deepcopy(self.info)
va_info.va = True
self._apply(info=va_info)
self.assertTrue(self.items[0].comp)
self.assertTrue(self.items[1].comp)
class StringDistanceTest(unittest.TestCase):
def test_equal_strings(self):
dist = match.string_dist(u'Some String', u'Some String')
self.assertEqual(dist, 0.0)
def test_different_strings(self):
dist = match.string_dist(u'Some String', u'Totally Different')
self.assertNotEqual(dist, 0.0)
def test_punctuation_ignored(self):
dist = match.string_dist(u'Some String', u'Some.String!')
self.assertEqual(dist, 0.0)
def test_case_ignored(self):
dist = match.string_dist(u'Some String', u'sOME sTring')
self.assertEqual(dist, 0.0)
def test_leading_the_has_lower_weight(self):
dist1 = match.string_dist(u'XXX Band Name', u'Band Name')
dist2 = match.string_dist(u'The Band Name', u'Band Name')
self.assert_(dist2 < dist1)
def test_parens_have_lower_weight(self):
dist1 = match.string_dist(u'One .Two.', u'One')
dist2 = match.string_dist(u'One (Two)', u'One')
self.assert_(dist2 < dist1)
def test_brackets_have_lower_weight(self):
dist1 = match.string_dist(u'One .Two.', u'One')
dist2 = match.string_dist(u'One [Two]', u'One')
self.assert_(dist2 < dist1)
def test_ep_label_has_zero_weight(self):
dist = match.string_dist(u'My Song (EP)', u'My Song')
self.assertEqual(dist, 0.0)
def test_featured_has_lower_weight(self):
dist1 = match.string_dist(u'My Song blah Someone', u'My Song')
dist2 = match.string_dist(u'My Song feat Someone', u'My Song')
self.assert_(dist2 < dist1)
def test_postfix_the(self):
dist = match.string_dist(u'The Song Title', u'Song Title, The')
self.assertEqual(dist, 0.0)
def test_postfix_a(self):
dist = match.string_dist(u'A Song Title', u'Song Title, A')
self.assertEqual(dist, 0.0)
def test_postfix_an(self):
dist = match.string_dist(u'An Album Title', u'Album Title, An')
self.assertEqual(dist, 0.0)
def test_empty_strings(self):
dist = match.string_dist(u'', u'')
self.assertEqual(dist, 0.0)
def test_solo_pattern(self):
# Just make sure these don't crash.
match.string_dist(u'The ', u'')
match.string_dist(u'(EP)', u'(EP)')
match.string_dist(u', An', u'')
def test_heuristic_does_not_harm_distance(self):
dist = match.string_dist(u'Untitled', u'[Untitled]')
self.assertEqual(dist, 0.0)
def test_ampersand_expansion(self):
dist = match.string_dist(u'And', u'&')
self.assertEqual(dist, 0.0)
def test_accented_characters(self):
dist = match.string_dist(u'\xe9\xe1\xf1', u'ean')
self.assertEqual(dist, 0.0)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| mit |
scholer/cadnano2.5 | cadnano/fileio/lattice.py | 2 | 17050 | """
"""
from math import ceil, floor, sqrt
import random
from typing import (
List,
Tuple
)
from cadnano.cntypes import (
Vec2T
)
root3 = 1.732051
class HoneycombDnaPart(object):
"""
SCAF_LOW = [[1, 11], [8, 18], [4, 15]]
SCAF_HIGH = [[2, 12], [9, 19], [5, 16]]
STAP_LOW = [[6, 16], [3, 13], [10, 20]]
STAP_HIGH = [[7, 17], [4, 14], [0, 11]]
# from 0: DR U DL aka 210 90 330
SCAF_LOW = [[1, 12], [8, 19], [5, 15]]
SCAF_HIGH = [[2, 13], [9, 20], [6, 16]]
STAP_LOW = [[17], [3], [10]]
STAP_HIGH = [[18], [4], [11]]
"""
STEP = 21 # 32 in square
TURNS_PER_STEP = 2.0
HELICAL_PITCH = STEP/TURNS_PER_STEP
TWIST_PER_BASE = 360./HELICAL_PITCH # degrees
TWIST_OFFSET = -(360./10.5)*1.0 # degrees
SUB_STEP_SIZE = STEP/3.
# Manually tuned grid offsets
PAD_GRID_XL = -100
PAD_GRID_XH = 70
PAD_GRID_YL = -150
PAD_GRID_YH = 105
@staticmethod
def isEvenParity(row: int, column: int) -> bool:
"""Return if the given row and column have even parity."""
return (row % 2) == (column % 2)
# end def
@staticmethod
def isOddParity(row: int, column: int) -> bool:
"""Return if the given row and column have odd parity."""
return (row % 2) ^ (column % 2)
# end def
@staticmethod
def distanceFromClosestLatticeCoord(x: float, y: float,
radius: float,
scale_factor: float = 1.0) -> Vec2T:
"""Given a x and y position, determine closest lattice coordinate and
the distance to the center of those coordinates.
"""
column_guess = x/(radius*root3)
row_guess = -(y - radius*2)/(radius*3)
possible_columns = (floor(column_guess), ceil(column_guess))
possible_rows = (floor(row_guess), ceil(row_guess))
best_guess = None
shortest_distance = float('inf')
for row in possible_rows:
for column in possible_columns:
guess_x, guess_y = HoneycombDnaPart.latticeCoordToQtXY(radius, row, column, scale_factor)
squared_distance = abs(guess_x-x)**2 + abs(guess_y-y)**2
distance = sqrt(squared_distance)
if distance < shortest_distance:
best_guess = (row, column)
shortest_distance = distance
return (shortest_distance, best_guess)
# end def
@staticmethod
def legacyLatticeCoordToPositionXY( radius: float,
row: int, column: int,
scale_factor: float = 1.0) -> Vec2T:
"""Convert legacy row,column coordinates to latticeXY."""
x = column*radius*root3
y_offset = radius
if HoneycombDnaPart.isEvenParity(row, column):
y = -row*radius*3. + radius + y_offset
else:
y = -row*radius*3. + y_offset
# Make sure radius is a float
return scale_factor*x, scale_factor*y
# end def
@staticmethod
def latticeCoordToModelXY( radius: float,
row: int, column: int,
scale_factor: float = 1.0) -> Vec2T:
"""Convert row, column coordinates to latticeXY."""
x = column*radius*root3*scale_factor
y_offset = radius
if HoneycombDnaPart.isEvenParity(row, column):
y = (row*radius*3. + radius + y_offset)*scale_factor
else:
y = (row*radius*3. + y_offset)*scale_factor
return x, y
# end def
@staticmethod
def latticeCoordToQtXY( radius: float,
row: int, column: int,
scale_factor: float = 1.0) -> Vec2T:
"""Call :meth:`HoneyCombDnaPart.latticeCoordToQtXY` with the supplied row
parameter inverted (i.e. multiplied by -1) to reflect the coordinates
used by Qt.
Args:
radius: the model radius
row: the row in question
column: the column in question
scale_factor: the scale factor to be used in the calculations
Returns:
The x, y coordinates of the given row and column
"""
return HoneycombDnaPart.latticeCoordToModelXY(radius, -row, column, scale_factor)
# end def
@staticmethod
def positionModelToLatticeCoord(radius: float,
x: float, y: float,
scale_factor: float = 1.0,
strict: bool = False) -> Tuple[int, int]:
"""Convert a model position to a lattice coordinate."""
assert isinstance(radius, float)
assert isinstance(x, float)
assert isinstance(y, float)
assert isinstance(scale_factor, float)
assert isinstance(strict, bool)
float_column = x/(radius*root3*scale_factor) + 0.5
column = int(float_column) if float_column >= 0 else int(float_column - 1)
row_temp = y/(radius*scale_factor)
if (row_temp % 3) > 1.0: # odd parity
float_row = (y-radius)/(scale_factor*radius*3) + radius
else: # even parity
float_row = y/(scale_factor*radius*3) + radius
row = int(float_row) if float_row >= 0 else int(float_row - 1)
if not strict:
return row, column
else:
gridpoint_center_x, gridpoint_center_y = HoneycombDnaPart.latticeCoordToQtXY(radius,
row,
column,
scale_factor)
if abs(x-gridpoint_center_x)**2 + abs(y+gridpoint_center_y)**2 >= (radius*scale_factor)**2:
return None
else:
return row, column
# end def
@staticmethod
def positionQtToLatticeCoord( radius: float,
x: float, y: float,
scale_factor: float = 1.0,
strict: bool = False) -> Tuple[int, int]:
"""Convert a Qt position to a lattice coordinate."""
return HoneycombDnaPart.positionModelToLatticeCoord(radius, x, -y, scale_factor, strict)
# end def
@staticmethod
def positionToLatticeCoordRound(radius: float,
x: float, y: float,
round_up_row: bool, round_up_col: bool,
scale_factor: float = 1.0) -> Vec2T:
"""Convert a model position to a rounded lattice coordinate."""
roundRow = ceil if round_up_row else floor
roundCol = ceil if round_up_col else floor
column = roundCol(x/(radius*root3*scale_factor))
row_temp = y/(radius*scale_factor)
if (row_temp % 3) + 0.5 > 1.0: # odd parity
row = roundRow((row_temp - 1)/3.)
else: # even parity
row = roundRow(row_temp/3.)
return row, column
# end def
@staticmethod
def isInLatticeCoord( radius_tuple: Tuple[float, float],
xy_tuple: Tuple[float, float],
coordinate_tuple: Tuple[int, int],
scale_factor: float) -> bool:
"""Determine if given x-y coordinates are inside a VH at a given
row-column coordinate
"""
if xy_tuple is None or coordinate_tuple is None:
return False
assert isinstance(radius_tuple, tuple) and len(radius_tuple) is 2
assert isinstance(xy_tuple, tuple) and len(xy_tuple) is 2 and all(isinstance(i, float) for i in xy_tuple)
assert isinstance(coordinate_tuple, tuple) and len(coordinate_tuple) is 2 and all(isinstance(i, int) for i in coordinate_tuple)
assert isinstance(scale_factor, float)
part_radius, item_radius = radius_tuple
row, column = coordinate_tuple
x, y = xy_tuple
row_x, row_y = HoneycombDnaPart.latticeCoordToQtXY(part_radius,
row,
column,
scale_factor)
return abs(row_x - x)**2 + abs(row_y - y)**2 <= item_radius**2
# end def
@staticmethod
def sanityCheckCalculations(iterations: int = 100000000):
"""Ensure that the values returned by latticeCoordToQtXY and
positionQtToLatticeCoord return consistent results.
"""
for _ in range(iterations):
radius = 1.125
scale_factor = 13.333333333333334
row = random.randint(-1000, 1000)
col = random.randint(-1000, 1000)
x_position, y_position = HoneycombDnaPart.latticeCoordToQtXY(radius, row, col, scale_factor)
output_row, output_column = HoneycombDnaPart.positionQtToLatticeCoord(radius, x_position, y_position,
scale_factor)
assert row == output_row, '''
Rows do not match: %s != %s.
Inputs:
radius %s
scale factor %s
row %s
column %s
''' % (row, output_row, radius, scale_factor, row, col)
assert col == output_column, '''
Rows do not match: %s != %s.
Inputs:
radius %s
scale factor %s
row %s
column %s
''' % (col, output_column, radius, scale_factor, row, col)
# end class
class SquareDnaPart(object):
"""
SCAF_LOW = [[4, 26, 15], [18, 28, 7], [10, 20, 31], [2, 12, 23]]
SCAF_HIGH = [[5, 27, 16], [19, 29, 8], [11, 21, 0], [3, 13, 24]]
STAP_LOW = [[31], [23], [15], [7]]
STAP_HIGH = [[0], [24], [16], [8]]
"""
STEP = 32 # 21 in honeycomb
SUB_STEP_SIZE = STEP/4
TURNS_PER_STEP = 3.0
HELICAL_PITCH = STEP/TURNS_PER_STEP
TWIST_PER_BASE = 360./HELICAL_PITCH # degrees
TWIST_OFFSET = 180. + TWIST_PER_BASE/2 # degrees
# Manually tuned grid offsets
PAD_GRID_XL = -80
PAD_GRID_XH = 80
PAD_GRID_YL = -80
PAD_GRID_YH = 80
@staticmethod
def isEvenParity(row: int, column: int) -> bool:
"""Return if the given row and column have even parity."""
return (row % 2) == (column % 2)
# end def
@staticmethod
def isOddParity(row: int, column: int) -> bool:
"""Return if the given row and column have odd parity."""
return (row % 2) ^ (column % 2)
# end def
@staticmethod
def distanceFromClosestLatticeCoord(radius: float,
x: float, y: float,
scale_factor: float = 1.0) -> Vec2T:
"""
Given a x and y position, determine closest lattice coordinate and the
distance to the center of those coordinates.
"""
column_guess = x/(2*radius)
row_guess = y/(2*radius)
possible_columns = (floor(column_guess), ceil(column_guess))
possible_rows = (floor(row_guess), ceil(row_guess))
best_guess = None
shortest_distance = float('inf')
for row in possible_rows:
for column in possible_columns:
guess_x, guess_y = SquareDnaPart.latticeCoordToModelXY(radius, -row, column, scale_factor)
squared_distance = (guess_x-x)**2 + (-guess_y-y)**2
distance = sqrt(squared_distance)
if distance < shortest_distance:
best_guess = (row, column)
shortest_distance = distance
return (shortest_distance, best_guess)
# end def
@staticmethod
def legacyLatticeCoordToPositionXY( radius: float,
row: int, column: int,
scale_factor: float = 1.0) -> Vec2T:
"""Convert legacy row, column coordinates to latticeXY."""
y = -row*2*radius
x = column*2*radius
return scale_factor*x, scale_factor*y
# end def
@staticmethod
def latticeCoordToModelXY( radius: float,
row: int, column: int,
scale_factor: float = 1.0) -> Vec2T:
"""Convert row, column coordinates to latticeXY."""
y = row*2*radius
x = column*2*radius
return scale_factor*x, scale_factor*y
# end def
@staticmethod
def latticeCoordToQtXY( radius: float,
row: int, column: int,
scale_factor: float = 1.0) -> Vec2T:
"""
Call SquareDnaPart.latticeCoordToQtXY with the supplied row
parameter inverted (i.e. multiplied by -1) to reflect the coordinates
used by Qt.
Args:
radius (float): the model radius
row (int): the row in question
column (int): the column in question
scale_factor (float): the scale factor to be used in the calculations
Returns:
The x, y coordinates of the given row and column
"""
return SquareDnaPart.latticeCoordToModelXY(radius, row, column, scale_factor)
# end def
@staticmethod
def positionModelToLatticeCoord(radius: float,
x: float, y: float,
scale_factor: float = 1.0,
strict: bool = False) -> Tuple[int, int]:
"""Convert a model position to a lattice coordinate."""
float_row = y/(2.*radius*scale_factor) + 0.5
float_column = x/(2.*radius*scale_factor) + 0.5
row = int(float_row) if float_row >= 0 else int(float_row - 1)
column = int(float_column) if float_column >= 0 else int(float_column - 1)
if not strict:
return row, column
else:
gridpoint_center_x, gridpoint_center_y = SquareDnaPart.latticeCoordToQtXY(radius,
row,
column,
scale_factor)
if abs(x-gridpoint_center_x)**2 + abs(y-gridpoint_center_y)**2 >= (radius*scale_factor)**2:
return None
else:
return row, column
# end def
@staticmethod
def positionQtToLatticeCoord( radius: float,
x: float, y: float,
scale_factor: float = 1.0,
strict: bool = False) -> Tuple[int, int]:
"""Convert a Qt position to a lattice coordinate."""
return SquareDnaPart.positionModelToLatticeCoord(radius, x, -y, scale_factor, strict)
# end def
@staticmethod
def positionToLatticeCoordRound(radius: float,
x: float, y: float,
scale_factor: float = 1.0) -> Tuple[int, int]:
"""Convert a model position to a rounded lattice coordinate."""
row = round(y/(2.*radius*scale_factor))
column = round(x/(2.*radius*scale_factor))
return row, column
# end def
@staticmethod
def isInLatticeCoord( radius_tuple: Tuple[float, float],
xy_tuple: Tuple[float, float],
coordinate_tuple: Tuple[int, int],
scale_factor: float) -> bool:
"""Determine if given x-y coordinates are inside a VH at a given
row-column coordinate
"""
if xy_tuple is None or coordinate_tuple is None:
return False
assert isinstance(radius_tuple, tuple) and len(radius_tuple) is 2
assert isinstance(xy_tuple, tuple) and len(xy_tuple) is 2 and all(isinstance(i, float) for i in xy_tuple)
assert isinstance(coordinate_tuple, tuple) and len(coordinate_tuple) is 2 and all(isinstance(i, int) for i in coordinate_tuple)
assert isinstance(scale_factor, float)
part_radius, item_radius = radius_tuple
row, column = coordinate_tuple
x, y = xy_tuple
row_x, row_y = SquareDnaPart.latticeCoordToQtXY(part_radius,
row,
column,
scale_factor)
return abs(row_x - x)**2 + abs(row_y - y)**2 <= item_radius**2
# end def
# end class
| mit |
yanchen036/tensorflow | tensorflow/contrib/distribute/python/input_ops.py | 14 | 6158 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input-pipeline utilities for Distribution strategies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import nest
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging
# TODO(priyag): Any other reader datasets to consider here?
_READER_DATASET_OPS = [
"TextLineDataset",
"TFRecordDataset",
"FixedLengthRecordDataset"
]
# pylint: disable=protected-access
def auto_shard_dataset(dataset, num_shards, index):
"""Shard the input pipeline by sharding the underlying list of files.
Args:
dataset: A `tf.data.Dataset` instance, typically the result of a bunch of
dataset transformations.
num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of
shards operating in parallel. Same usage as in `Dataset.shard`.
index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.
Same usage as in `Dataset.shard`.
Returns:
A modified `Dataset` obtained by updating the pipeline sharded by the
files.
Raises:
NotImplementedError: If we cannot automatically determine a good way to
shard the input dataset.
"""
# TODO(priyag): Clone datasets instead of updating in place, similar to the
# clone method for TFRecordDataset.
def _auto_shard_impl(dataset, found_reader_op):
"""Recursive implementation of auto sharding."""
if not found_reader_op:
# TODO(priyag): Make this check more robust by enforcing some common
# property on reader datasets.
if (isinstance(dataset, readers.TextLineDataset) or
isinstance(dataset, readers.FixedLengthRecordDataset)):
filenames_tensor = dataset._filenames
num_files = array_ops.size(filenames_tensor)
sharded_filenames_tensor = array_ops.gather(
filenames_tensor, math_ops.range(index, num_files, num_shards))
dataset._filenames = sharded_filenames_tensor
return dataset
elif isinstance(dataset, readers.TFRecordDataset):
# `TFRecordDataset` needs to be handled separately than other readers
# because it converts filenames to a dataset first. Also, we clone it
# instead of updating in place because it has special logic in the
# constructor. Eventually we will change all cases to clone datasets
# instead of updating in-place.
return dataset._clone(
filenames=dataset._filenames.shard(num_shards, index))
elif hasattr(dataset, "_map_func"):
# TODO(priyag): Make this check more robust by enforcing some common
# property on all map/flatmap/interleave datasets.
map_func_def = dataset._map_func.definition
for node in map_func_def.node_def:
if node.op in _READER_DATASET_OPS:
found_reader_op = True
break
elif node.op == "FlatMapDataset":
# TODO(priyag): Should this check for other map datasets? Should it
# be recursive? It is too specific to implementation of
# TFRecordDataset right now.
nested_func_name = node.attr["f"].func.name
nested_func = ops.get_default_graph()._functions[nested_func_name]
for nested_node in nested_func.definition.node_def:
if nested_node.op in _READER_DATASET_OPS:
found_reader_op = True
break
if found_reader_op:
break
if found_reader_op:
dataset._input_dataset = _auto_shard_impl(
dataset._input_dataset, found_reader_op)
return dataset
# TODO(priyag): Make _input_dataset(s) a common property of all datasets to
# make this check more robust.
if hasattr(dataset, "_input_dataset"):
dataset._input_dataset = _auto_shard_impl(
dataset._input_dataset, found_reader_op)
if hasattr(dataset, "_dataset_to_concatenate"):
# Special case for `ConcatentateDataset`. We want to shard all input
# datasets.
dataset._dataset_to_concatenate = _auto_shard_impl(
dataset._dataset_to_concatenate, found_reader_op)
return dataset
if hasattr(dataset, "_datasets"):
# Special case for `ZipDataset`.
dataset._datasets = nest.pack_sequence_as(dataset._datasets, [
_auto_shard_impl(ds, found_reader_op)
for ds in nest.flatten(dataset._datasets)
])
return dataset
if not found_reader_op:
tf_logging.warn(
"Could not find a standard reader in the input pipeline"
"(one of TextLineDataset, TFRecordDataset, FixedLengthRecordDataset)."
"Falling back to sharding the dataset anyway. Please verify"
"correctness of auto-sharding for your input.")
# TODO(priyag): What do we want to do if the number of filenames is
# uneven in the number of shards? By default, this will just return as
# many items it can before throwing OutOfRangeError.
# TODO(priyag): This will shard the filenames before any shuffling of the
# filename dataset. It might be desirable to shard after shuffling
# filenames? If so, how do we achieve that?
return dataset.shard(num_shards, index)
return _auto_shard_impl(dataset=dataset, found_reader_op=False)
| apache-2.0 |
smartstudy/project_cron | project_cron/utils/processutil.py | 1 | 1524 | import os
from subprocess import Popen, PIPE
from project_cron.utils import logutil
def call(command, cwd=None):
_install_package_if_not_exists(command)
logutil.info('Execute', ' '.join(command))
popen = Popen(command, stdout=PIPE, stderr=PIPE, env=_get_env(), cwd=cwd)
out, err = popen.communicate()
if popen.returncode != 0:
logutil.error('Return Code', popen.returncode)
logutil.error('Output', out.decode('utf8', errors='ignore'))
logutil.error('Error', err.decode('utf8', errors='ignore'))
return popen.returncode, out.decode('utf8', errors='ignore'), err.decode('utf8', errors='ignore')
def _get_env():
env = os.environ.copy()
env['__PYVENV_LAUNCHER__'] = '/usr/local/bin/python3'
if 'PYTHONHOME' in env:
env.pop('PYTHONHOME')
if 'PYTHONPATH' in env:
env.pop('PYTHONPATH')
env['PATH'] = '/usr/local/bin:/usr/local/sbin:%s' % (env['PATH'] if 'PATH' in env else '')
return env
def _install_package_if_not_exists(command):
if os.path.exists(command[0]) or command[0].find('/usr/local/bin') == -1:
return
package_name = os.path.split(command[0])[1]
if package_name == 'ffmpeg':
command = ['/usr/local/bin/brew', 'install', 'ffmpeg', '--with-libass', '--with-faac']
elif package_name == 'mkvmerge':
command = ['/usr/local/bin/brew', 'install', 'mkvtoolnix']
else:
command = ['/usr/local/bin/brew', 'install', package_name]
Popen(command, env=_get_env()).communicate()
| mit |
inactivist/django-endless-pagination | endless_pagination/paginator.py | 3 | 3565 | from math import ceil
from django.core.paginator import Paginator, Page, EmptyPage, PageNotAnInteger
class CustomPage(Page):
def start_index(self):
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
elif self.number == 1:
return 1
return (self.number-2)*self.paginator.per_page + self.paginator.first_page + 1
def end_index(self):
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return (self.number-1)*self.paginator.per_page + self.paginator.first_page
class BasePaginator(Paginator):
def __init__(self, object_list, per_page, **kwargs):
if "first_page" in kwargs:
self.first_page = kwargs.pop("first_page")
else:
self.first_page = per_page
super(BasePaginator, self).__init__(object_list, per_page, **kwargs)
def get_current_per_page(self, number):
return self.first_page if number == 1 else self.per_page
class DefaultPaginator(BasePaginator):
def page(self, number):
number = self.validate_number(number)
bottom = 0 if number == 1 else ((number-2)*self.per_page + self.first_page)
top = bottom + self.get_current_per_page(number)
if top + self.orphans >= self.count:
top = self.count
return CustomPage(self.object_list[bottom:top], number, self)
def _get_num_pages(self):
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(0, self.count - self.orphans - self.first_page)
self._num_pages = int(ceil(hits / float(self.per_page))) + 1
return self._num_pages
num_pages = property(_get_num_pages)
class LazyPaginator(BasePaginator):
def validate_number(self, number):
try:
number = int(number)
except ValueError:
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
def page(self, number):
number = self.validate_number(number)
current_per_page = self.get_current_per_page(number)
bottom = 0 if number == 1 else ((number-2)*self.per_page + self.first_page)
top = bottom + current_per_page
# get more objects to see if there is a next page
objects = list(self.object_list[bottom:top + self.orphans + 1])
objects_count = len(objects)
if objects_count > (current_per_page + self.orphans):
# if there is a next page increase the total number of pages
self._num_pages = number + 1
# but return only objects for this page
objects = objects[:current_per_page]
elif( number != 1) and (objects_count <= self.orphans):
raise EmptyPage('That page contains no results')
else:
# this is the last page
self._num_pages = number
return CustomPage(objects, number, self)
def _get_count(self):
raise NotImplementedError
count = property(_get_count)
def _get_num_pages(self):
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
raise NotImplementedError
page_range = property(_get_page_range)
| mit |
benob/chainer | chainer/functions/loss/softmax_cross_entropy.py | 2 | 7313 | import numpy
import six
import chainer
from chainer import cuda
from chainer import function
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cudnn.cudnn
_algorithm = libcudnn.CUDNN_SOFTMAX_LOG
_mode = libcudnn.CUDNN_SOFTMAX_MODE_CHANNEL
_cudnn_version = libcudnn.getVersion()
def logsumexp(xp, x):
m = x.max(axis=1, keepdims=True)
y = x - m
xp.exp(y, out=y)
return xp.log(y.sum(axis=1, keepdims=True)) + m
def softmax_log(x, use_cudnn):
xp = cuda.get_array_module(x)
if (xp != numpy and cuda.cudnn_enabled and use_cudnn and
_cudnn_version >= 3000):
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
handle = cudnn.get_handle()
x_cube = x.reshape(x.shape[:2] + (-1, 1))
desc = cudnn.create_tensor_descriptor(x_cube)
y = xp.empty_like(x)
libcudnn.softmaxForward(
handle, _algorithm, _mode, one.data, desc.value,
x_cube.data.ptr, zero.data, desc.value,
y.data.ptr)
return y
else:
log_z = logsumexp(xp, x)
return x - log_z
class SoftmaxCrossEntropy(function.Function):
"""Softmax activation followed by a cross entropy loss."""
ignore_label = -1
def __init__(self, use_cudnn=True, normalize=True, cache_score=True):
self.use_cudnn = use_cudnn
self.normalize = normalize
self.cache_score = cache_score
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, t_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
t_type.dtype == numpy.int32,
t_type.ndim == x_type.ndim - 1,
x_type.shape[0] == t_type.shape[0],
x_type.shape[2:] == t_type.shape[1:],
)
def _check_input_values(self, x, t):
if not (((0 <= t) &
(t < x.shape[1])) |
(t == self.ignore_label)).all():
msg = ('Each label `t` need to satisfty '
'`0 <= t < x.shape[1] or t == %d`' % self.ignore_label)
raise ValueError(msg)
def forward_cpu(self, inputs):
x, t = inputs
if chainer.is_debug():
self._check_input_values(x, t)
log_y = softmax_log(x, False)
if self.cache_score:
self.y = numpy.exp(log_y)
log_yd = numpy.rollaxis(log_y, 1)
log_yd = log_yd.reshape(len(log_yd), -1)
log_p = log_yd[numpy.maximum(t.ravel(), 0), six.moves.range(t.size)]
# deal with the case where the SoftmaxCrossEntropy is
# unpickled from the old version
if getattr(self, 'normalize', True):
count = (t != self.ignore_label).sum()
else:
count = len(x)
self._coeff = 1.0 / max(count, 1)
y = (log_p * (t.ravel() != self.ignore_label)).sum(keepdims=True) \
* (-self._coeff)
return y.reshape(()),
def forward_gpu(self, inputs):
cupy = cuda.cupy
x, t = inputs
if chainer.is_debug():
self._check_input_values(x, t)
log_y = softmax_log(x, self.use_cudnn)
if self.cache_score:
self.y = cupy.exp(log_y)
if getattr(self, 'normalize', True):
coeff = cupy.maximum(1, (t != self.ignore_label).sum())
else:
coeff = max(1, len(t))
self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)
log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
ret = cuda.reduce(
'S t, raw T log_y, int32 n_channel, raw T coeff', 'T out',
't == -1 ? T(0) : log_y[_j * n_channel + t]',
'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
)(t, log_y.reduced_view(), log_y.shape[-1], self._coeff)
return ret,
def backward_cpu(self, inputs, grad_outputs):
x, t = inputs
gloss = grad_outputs[0]
n_unit = t.size // len(t)
if hasattr(self, 'y'):
y = self.y.copy()
else:
log_y = softmax_log(x, self.use_cudnn)
y = numpy.exp(log_y)
if y.ndim == 2:
gx = y
gx[six.moves.xrange(len(t)), numpy.maximum(t, 0)] -= 1
gx *= (t != self.ignore_label).reshape((len(t), 1))
else:
# in the case where y.ndim is higher than 2,
# we think that a current implementation is inefficient
# because it yields two provisional arrays for indexing.
gx = y.reshape(y.shape[0], y.shape[1], -1)
fst_index = numpy.arange(t.size) // n_unit
trd_index = numpy.arange(t.size) % n_unit
gx[fst_index, numpy.maximum(t.ravel(), 0), trd_index] -= 1
gx *= (t != self.ignore_label).reshape((len(t), 1, -1))
gx = gx.reshape(y.shape)
gx *= gloss * self._coeff
return gx, None
def backward_gpu(self, inputs, grad_outputs):
cupy = cuda.cupy
x, t = inputs
if hasattr(self, 'y'):
y = self.y
else:
y = softmax_log(x, self.use_cudnn)
cupy.exp(y, out=y)
gloss = grad_outputs[0]
n_unit = t.size // len(t)
coeff = gloss * self._coeff
gx = cuda.elementwise(
'T y, S t, raw T coeff, S n_channel, S n_unit',
'T gx',
'''
const int c = (i / n_unit % n_channel);
gx = (t == -1) ? 0 : (coeff[0] * (y - (c == t)));
''',
'softmax_crossent_bwd')(
y, cupy.expand_dims(t, 1), coeff, x.shape[1], n_unit)
return gx, None
def softmax_cross_entropy(
x, t, use_cudnn=True, normalize=True, cache_score=True):
"""Computes cross entropy loss for pre-softmax activations.
Args:
x (Variable): Variable holding a multidimensional array whose element
indicates unnormalized log probability: the first axis of the
variable represents the number of samples, and the second axis
represents the number of classes. While this function computes
a usual softmax cross entropy if the number of dimensions is equal
to 2, it computes a cross entropy of the replicated softmax if the
number of dimensions is greater than 2.
t (Variable): Variable holding an int32 vector of ground truth labels.
If ``t[i] == -1``, corresponding ``x[i]`` is ignored.
normalize (Variable): Variable holding a boolean value which
determines the normalization constant. If true, this function
normalizes the cross entropy loss across all instances. If else,
it only normalizes along a batch size.
cache_score (bool): When it is ``True``, the function stores result
of forward computation to use it on backward computation. It
reduces computational cost though consumes more memory.
Returns:
Variable: A variable holding a scalar array of the cross entropy loss.
.. note::
This function is differentiable only by ``x``.
"""
return SoftmaxCrossEntropy(use_cudnn, normalize, cache_score)(x, t)
| mit |
haveal/googleads-python-lib | examples/dfp/v201411/placement_service/get_placements_by_statement.py | 4 | 1866 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all active placements by using a statement.
To create a placement, run create_placements.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201411')
# Create a statement to only select active placements.
values = [{
'key': 'status',
'value': {
'xsi_type': 'TextValue',
'value': 'ACTIVE'
}
}]
query = 'WHERE status = :status'
statement = dfp.FilterStatement(query, values)
# Get placements by statement.
while True:
response = placement_service.getPlacementsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for placement in response['results']:
print ('Placement with id \'%s\' and name \'%s\' was found.'
% (placement['id'], placement['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
drawks/ansible | lib/ansible/plugins/doc_fragments/shell_common.py | 79 | 1833 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# common shelldocumentation fragment
DOCUMENTATION = """
options:
remote_tmp:
description:
- Temporary directory to use on targets when executing tasks.
default: '~/.ansible/tmp'
env: [{name: ANSIBLE_REMOTE_TEMP}, {name: ANSIBLE_REMOTE_TMP}]
ini:
- section: defaults
key: remote_tmp
vars:
- name: ansible_remote_tmp
system_tmpdirs:
description:
- "List of valid system temporary directories for Ansible to choose when it cannot use
``remote_tmp``, normally due to permission issues. These must be world readable, writable,
and executable."
default: [ /var/tmp, /tmp ]
type: list
env: [{name: ANSIBLE_SYSTEM_TMPDIRS}]
ini:
- section: defaults
key: system_tmpdirs
vars:
- name: ansible_system_tmpdirs
async_dir:
description:
- Directory in which ansible will keep async job information
default: '~/.ansible_async'
env: [{name: ANSIBLE_ASYNC_DIR}]
ini:
- section: defaults
key: async_dir
vars:
- name: ansible_async_dir
environment:
type: dict
default: {}
description:
- dictionary of environment variables and their values to use when executing commands.
admin_users:
type: list
default: ['root', 'toor']
description:
- list of users to be expected to have admin privileges. This is used by the controller to
determine how to share temporary files between the remote user and the become user.
env:
- name: ANSIBLE_ADMIN_USERS
ini:
- section: defaults
key: admin_users
vars:
- name: ansible_admin_users
"""
| gpl-3.0 |
atheendra/access_keys | keystone/tests/test_cache.py | 5 | 9945 | # Copyright 2013 Metacloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from dogpile.cache import api
from dogpile.cache import proxy
from keystone.common import cache
from keystone import config
from keystone import exception
from keystone import tests
CONF = config.CONF
NO_VALUE = api.NO_VALUE
def _copy_value(value):
if value is not NO_VALUE:
value = copy.deepcopy(value)
return value
# NOTE(morganfainberg): WARNING - It is not recommended to use the Memory
# backend for dogpile.cache in a real deployment under any circumstances. The
# backend does no cleanup of expired values and therefore will leak memory. The
# backend is not implemented in a way to share data across processes (e.g.
# Keystone in HTTPD. This proxy is a hack to get around the lack of isolation
# of values in memory. Currently it blindly stores and retrieves the values
# from the cache, and modifications to dicts/lists/etc returned can result in
# changes to the cached values. In short, do not use the dogpile.cache.memory
# backend unless you are running tests or expecting odd/strange results.
class CacheIsolatingProxy(proxy.ProxyBackend):
"""Proxy that forces a memory copy of stored values.
The default in-memory cache-region does not perform a copy on values it
is meant to cache. Therefore if the value is modified after set or after
get, the cached value also is modified. This proxy does a copy as the last
thing before storing data.
"""
def get(self, key):
return _copy_value(self.proxied.get(key))
def set(self, key, value):
self.proxied.set(key, _copy_value(value))
class TestProxy(proxy.ProxyBackend):
def get(self, key):
value = _copy_value(self.proxied.get(key))
if value is not NO_VALUE:
if isinstance(value[0], TestProxyValue):
value[0].cached = True
return value
class TestProxyValue(object):
def __init__(self, value):
self.value = value
self.cached = False
class CacheRegionTest(tests.TestCase):
def setUp(self):
super(CacheRegionTest, self).setUp()
self.region = cache.make_region()
cache.configure_cache_region(self.region)
self.region.wrap(TestProxy)
self.test_value = TestProxyValue('Decorator Test')
def _add_test_caching_option(self):
self.config_fixture.register_opt(
config.config.cfg.BoolOpt('caching', default=True), group='cache')
def _get_cacheable_function(self):
SHOULD_CACHE_FN = cache.should_cache_fn('cache')
@self.region.cache_on_arguments(should_cache_fn=SHOULD_CACHE_FN)
def cacheable_function(value):
return value
return cacheable_function
def test_region_built_with_proxy_direct_cache_test(self):
# Verify cache regions are properly built with proxies.
test_value = TestProxyValue('Direct Cache Test')
self.region.set('cache_test', test_value)
cached_value = self.region.get('cache_test')
self.assertTrue(cached_value.cached)
def test_cache_region_no_error_multiple_config(self):
# Verify configuring the CacheRegion again doesn't error.
cache.configure_cache_region(self.region)
cache.configure_cache_region(self.region)
def test_should_cache_fn_global_cache_enabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally enabled.
cacheable_function = self._get_cacheable_function()
self.config_fixture.config(group='cache', enabled=True)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertTrue(cached_value.cached)
def test_should_cache_fn_global_cache_disabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally disabled.
cacheable_function = self._get_cacheable_function()
self.config_fixture.config(group='cache', enabled=False)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertFalse(cached_value.cached)
def test_should_cache_fn_global_cache_disabled_section_cache_enabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally disabled and the specific
# section caching enabled.
cacheable_function = self._get_cacheable_function()
self._add_test_caching_option()
self.config_fixture.config(group='cache', enabled=False)
self.config_fixture.config(group='cache', caching=True)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertFalse(cached_value.cached)
def test_should_cache_fn_global_cache_enabled_section_cache_disabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally enabled and the specific
# section caching disabled.
cacheable_function = self._get_cacheable_function()
self._add_test_caching_option()
self.config_fixture.config(group='cache', enabled=True)
self.config_fixture.config(group='cache', caching=False)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertFalse(cached_value.cached)
def test_should_cache_fn_global_cache_enabled_section_cache_enabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally enabled and the specific
# section caching enabled.
cacheable_function = self._get_cacheable_function()
self._add_test_caching_option()
self.config_fixture.config(group='cache', enabled=True)
self.config_fixture.config(group='cache', caching=True)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertTrue(cached_value.cached)
def test_cache_dictionary_config_builder(self):
"""Validate we build a sane dogpile.cache dictionary config."""
self.config_fixture.config(group='cache',
config_prefix='test_prefix',
backend='some_test_backend',
expiration_time=86400,
backend_argument=['arg1:test',
'arg2:test:test',
'arg3.invalid'])
config_dict = cache.build_cache_config()
self.assertEqual(
CONF.cache.backend, config_dict['test_prefix.backend'])
self.assertEqual(
CONF.cache.expiration_time,
config_dict['test_prefix.expiration_time'])
self.assertEqual('test', config_dict['test_prefix.arguments.arg1'])
self.assertEqual('test:test',
config_dict['test_prefix.arguments.arg2'])
self.assertFalse('test_prefix.arguments.arg3' in config_dict)
def test_cache_debug_proxy(self):
single_value = 'Test Value'
single_key = 'testkey'
multi_values = {'key1': 1, 'key2': 2, 'key3': 3}
self.region.set(single_key, single_value)
self.assertEqual(single_value, self.region.get(single_key))
self.region.delete(single_key)
self.assertEqual(NO_VALUE, self.region.get(single_key))
self.region.set_multi(multi_values)
cached_values = self.region.get_multi(multi_values.keys())
for value in multi_values.values():
self.assertIn(value, cached_values)
self.assertEqual(len(multi_values.values()), len(cached_values))
self.region.delete_multi(multi_values.keys())
for value in self.region.get_multi(multi_values.keys()):
self.assertEqual(NO_VALUE, value)
def test_configure_non_region_object_raises_error(self):
self.assertRaises(exception.ValidationError,
cache.configure_cache_region,
"bogus")
class CacheNoopBackendTest(tests.TestCase):
def setUp(self):
super(CacheNoopBackendTest, self).setUp()
self.region = cache.make_region()
cache.configure_cache_region(self.region)
def config_overrides(self):
super(CacheNoopBackendTest, self).config_overrides()
self.config_fixture.config(group='cache',
backend='keystone.common.cache.noop')
def test_noop_backend(self):
single_value = 'Test Value'
single_key = 'testkey'
multi_values = {'key1': 1, 'key2': 2, 'key3': 3}
self.region.set(single_key, single_value)
self.assertEqual(NO_VALUE, self.region.get(single_key))
self.region.set_multi(multi_values)
cached_values = self.region.get_multi(multi_values.keys())
self.assertEqual(len(cached_values), len(multi_values.values()))
for value in cached_values:
self.assertEqual(NO_VALUE, value)
# Delete should not raise exceptions
self.region.delete(single_key)
self.region.delete_multi(multi_values.keys())
| apache-2.0 |
sunny-wyb/xen-4.1.2 | dist/install/usr/lib/python2.7/site-packages/xen/sv/DomInfo.py | 46 | 9486 | from xen.xend.XendClient import server
from xen.xend import PrettyPrint
from xen.sv.HTMLBase import HTMLBase
from xen.sv.util import *
from xen.sv.GenTabbed import *
from xen.sv.Wizard import *
DEBUG=1
class DomInfo( GenTabbed ):
def __init__( self, urlWriter ):
self.dom = 0;
GenTabbed.__init__( self, "Domain Info", urlWriter, [ 'General', 'SXP', 'Devices', 'Migrate', 'Save' ], [ DomGeneralTab, DomSXPTab, DomDeviceTab, DomMigrateTab, DomSaveTab ] )
def write_BODY( self, request ):
try:
dom = int( getVar( 'dom', request ) )
except:
request.write( "<p>Please Select a Domain</p>" )
return None
GenTabbed.write_BODY( self, request )
def write_MENU( self, request ):
domains = []
try:
domains = server.xend_domains()
domains.sort()
except:
pass
request.write( "\n<table style='border:0px solid white' cellspacing='0' cellpadding='0' border='0' width='100%'>\n" )
request.write( "<tr class='domainInfoHead'>" )
request.write( "<td class='domainInfoHead' align='center'>Domain</td>\n" )
request.write( "<td class='domainInfoHead' align='center'>Name</td>\n" )
request.write( "<td class='domainInfoHead' align='center'>State</td>\n" )
request.write( "<td class='domainInfoHead' align='center'></td>\n" )
request.write( "</tr>" )
odd = True
if not domains is None:
for domain in domains:
odd = not odd;
if odd:
request.write( "<tr class='domainInfoOdd'>\n" )
else:
request.write( "<tr class='domainInfoEven'>\n" )
domInfo = getDomInfo( domain )
request.write( "<td class='domainInfo' align='center'>%(id)s</td>\n" % domInfo )
url = self.urlWriter( "&dom=%(id)s" % domInfo )
request.write( "<td class='domainInfo' align='center'><a href='%s'>%s</a></td>\n" % ( url, domInfo['name'] ) )
request.write( "<td class='domainInfo' align='center'>%(state)5s</td>\n" % domInfo )
if domInfo[ 'id' ] != "0":
request.write( "<td class='domainInfo' align='center'>" )
if domInfo[ 'state' ][ 2 ] == "-":
request.write( "<img src='images/small-pause.png' onclick='doOp2( \"pause\", \"%(dom)-4s\" )'>" % domInfo )
else:
request.write( "<img src='images/small-unpause.png' onclick='doOp2( \"unpause\", \"%(dom)-4s\" )'>" % domInfo )
request.write( "<img src='images/small-destroy.png' onclick='doOp2( \"destroy\", \"%(dom)-4s\" )'></td>" % domInfo )
else:
request.write( "<td> </td>" )
request.write( "</tr>\n" )
else:
request.write( "<tr colspan='10'><p class='small'>Error getting domain list<br/>Perhaps XenD not running?</p></tr>")
request.write( "</table>" )
class DomGeneralTab( CompositeTab ):
def __init__( self, urlWriter ):
CompositeTab.__init__( self, [ DomGenTab, DomActionTab ], urlWriter )
class DomGenTab( GeneralTab ):
def __init__( self, _ ):
titles = {}
titles[ 'ID' ] = 'dom'
titles[ 'Name' ] = 'name'
titles[ 'CPU' ] = 'cpu'
titles[ 'Memory' ] = ( 'mem', memoryFormatter )
titles[ 'State' ] = ( 'state', stateFormatter )
titles[ 'Total CPU' ] = ( 'cpu_time', smallTimeFormatter )
titles[ 'Up Time' ] = ( 'up_time', bigTimeFormatter )
GeneralTab.__init__( self, {}, titles )
def write_BODY( self, request ):
self.dom = getVar('dom', request)
if self.dom is None:
request.write( "<p>Please Select a Domain</p>" )
return None
self.dict = getDomInfo( self.dom )
GeneralTab.write_BODY( self, request )
class DomSXPTab( PreTab ):
def __init__( self, _ ):
self.dom = 0
PreTab.__init__( self, "" )
def write_BODY( self, request ):
self.dom = getVar('dom', request)
if self.dom is None:
request.write( "<p>Please Select a Domain</p>" )
return None
try:
domInfo = server.xend_domain( self.dom )
except:
domInfo = [["Error getting domain details."]]
self.source = sxp2prettystring( domInfo )
PreTab.write_BODY( self, request )
class DomActionTab( ActionTab ):
def __init__( self, _ ):
actions = { "shutdown" : "Shutdown",
"reboot" : "Reboot",
"pause" : "Pause",
"unpause" : "Unpause",
"destroy" : "Destroy" }
ActionTab.__init__( self, actions )
def op_shutdown( self, request ):
dom = getVar( 'dom', request )
if not dom is None and dom != '0':
if DEBUG: print ">DomShutDown %s" % dom
try:
server.xend_domain_shutdown( int( dom ), "poweroff" )
except:
pass
def op_reboot( self, request ):
dom = getVar( 'dom', request )
if not dom is None and dom != '0':
if DEBUG: print ">DomReboot %s" % dom
try:
server.xend_domain_shutdown( int( dom ), "reboot" )
except:
pass
def op_pause( self, request ):
dom = getVar( 'dom', request )
if not dom is None and dom != '0':
if DEBUG: print ">DomPause %s" % dom
try:
server.xend_domain_pause( int( dom ) )
except:
pass
def op_unpause( self, request ):
dom = getVar( 'dom', request )
if not dom is None and dom != '0':
if DEBUG: print ">DomUnpause %s" % dom
try:
server.xend_domain_unpause( int( dom ) )
except:
pass
def op_destroy( self, request ):
dom = getVar( 'dom', request )
if not dom is None and dom != '0':
if DEBUG: print ">DomDestroy %s" % dom
try:
server.xend_domain_destroy(int( dom ))
except:
pass
class DomDeviceTab( CompositeTab ):
def __init__( self, urlWriter ):
CompositeTab.__init__( self, [ DomDeviceListTab, DomDeviceOptionsTab, DomDeviceActionTab ], urlWriter )
class DomDeviceListTab( NullTab ):
title = "Device List"
def __init__( self, _ ):
pass
class DomDeviceOptionsTab( NullTab ):
title = "Device Options"
def __init__( self, _ ):
pass
class DomDeviceActionTab( ActionTab ):
def __init__( self, _ ):
ActionTab.__init__( self, { "addvcpu" : "Add VCPU", "addvbd" : "Add VBD", "addvif" : "Add VIF" } )
class DomMigrateTab( CompositeTab ):
def __init__( self, urlWriter ):
CompositeTab.__init__( self, [ DomMigrateExtraTab, DomMigrateActionTab ], urlWriter )
class DomMigrateExtraTab( Sheet ):
def __init__( self, urlWriter ):
Sheet.__init__( self, urlWriter, "Configure Migration", 0)
self.addControl( TickControl('live', 'True', 'Live migrate:') )
self.addControl( InputControl('rate', '0', 'Rate limit:') )
self.addControl( InputControl( 'dest', 'host.domain', 'Name or IP address:', ".*") )
class DomMigrateActionTab( ActionTab ):
def __init__( self, _ ):
actions = { "migrate" : "Migrate" }
ActionTab.__init__( self, actions )
def op_migrate( self, request ):
try:
domid = int( getVar( 'dom', request ) )
live = getVar( 'live', request )
rate = getVar( 'rate', request )
dest = getVar( 'dest', request )
dom_sxp = server.xend_domain_migrate( domid, dest, live == 'True', rate )
success = "Your domain was successfully Migrated.\n"
except Exception, e:
success = "There was an error migrating your domain\n"
dom_sxp = str(e)
class DomSaveTab( CompositeTab ):
def __init__( self, urlWriter ):
CompositeTab.__init__( self, [ DomSaveExtraTab, DomSaveActionTab ], urlWriter )
class DomSaveExtraTab( Sheet ):
title = "Save location"
def __init__( self, urlWriter ):
Sheet.__init__( self, urlWriter, "Save Domain to file", 0 )
self.addControl( InputControl( 'file', '', 'Suspend file name:', ".*") )
class DomSaveActionTab( ActionTab ):
def __init__( self, _ ):
actions = { "save" : "Save" }
ActionTab.__init__( self, actions )
def op_save( self, request ):
try:
dom_sxp = server.xend_domain_save( config['domid'], config['file'] )
success = "Your domain was successfully saved.\n"
except Exception, e:
success = "There was an error saving your domain\n"
dom_sxp = str(e)
try:
dom = int( getVar( 'dom', request ) )
except:
pass
| gpl-2.0 |
poondog/kangaroo-m7-mkIII | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
cliqz/socorro | socorro/external/postgresql/base.py | 9 | 15867 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Deprecated by socorro/external/postgresql/service_base.py"""
import contextlib
import logging
import psycopg2
from socorro.external import DatabaseError
from socorro.external.postgresql.dbapi2_util import (
execute_query_fetchall,
single_value_sql
)
logger = logging.getLogger("webapi")
def add_param_to_dict(dictionary, key, value):
"""
Dispatch a list of parameters into a dictionary.
"""
for i, elem in enumerate(value):
dictionary[key + str(i)] = elem
return dictionary
class PostgreSQLBase(object):
"""
Base class for PostgreSQL based service implementations.
"""
def __init__(self, *args, **kwargs):
"""
Store the config and create a connection to the database.
Keyword arguments:
config -- Configuration of the application.
"""
self.context = kwargs.get("config")
try:
self.database = self.context.database_class(
self.context
)
except KeyError:
# some tests seem to put the database config parameters
# into a namespace called 'database', others do not
self.database = self.context.database.database_class(
self.context.database
)
@contextlib.contextmanager
def get_connection(self):
connection = self.database.connection()
try:
yield connection
finally:
connection.close()
def query(self, sql, params=None, error_message=None, connection=None):
"""Return the result of a query executed against PostgreSQL.
Create a connection, open a cursor, execute the query and return the
results. If an error occures, log it and raise a DatabaseError.
Keyword arguments:
sql -- SQL query to execute.
params -- Parameters to merge into the SQL query when executed.
error_message -- Eventual error message to log.
connection -- Optional connection to the database. If none, a new one
will be opened.
"""
return self._execute(
execute_query_fetchall,
sql,
error_message or "Failed to execute query against PostgreSQL",
params=params,
connection=connection
)
def count(self, sql, params=None, error_message=None, connection=None):
"""Return the result of a count SQL query executed against PostgreSQL.
Create a connection, open a cursor, execute the query and return the
result. If an error occures, log it and raise a DatabaseError.
Keyword arguments:
sql -- SQL query to execute.
params -- Parameters to merge into the SQL query when executed.
error_message -- Eventual error message to log.
connection -- Optional connection to the database. If none, a new one
will be opened.
"""
return self._execute(
single_value_sql,
sql,
error_message or "Failed to execute count against PostgreSQL",
params=params,
connection=connection
)
def _execute(
self, actor_function, sql, error_message, params=None, connection=None
):
fresh_connection = False
try:
if not connection:
connection = self.database.connection()
fresh_connection = True
# logger.debug(connection.cursor().mogrify(sql, params))
result = actor_function(connection, sql, params)
connection.commit()
except psycopg2.Error, e:
error_message = "%s - %s" % (error_message, str(e))
logger.error(error_message, exc_info=True)
if connection:
connection.rollback()
raise DatabaseError(error_message)
finally:
if connection and fresh_connection:
connection.close()
return result
@staticmethod
def parse_versions(versions_list, products):
"""
Parses the versions, separating by ":" and returning versions
and products.
"""
versions = []
for v in versions_list:
if v.find(":") > -1:
pv = v.split(":")
versions.append(pv[0])
versions.append(pv[1])
else:
products.append(v)
return (versions, products)
@staticmethod
def prepare_terms(terms, search_mode):
"""
Prepare terms for search, adding '%' where needed,
given the search mode.
"""
if search_mode in ("contains", "starts_with"):
terms = terms.replace("_", "\_").replace("%", "\%")
if search_mode == "contains":
terms = "%" + terms + "%"
elif search_mode == "starts_with":
terms = terms + "%"
return terms
@staticmethod
def dispatch_params(sql_params, key, value):
"""
Dispatch a parameter or a list of parameters into the params array.
"""
if not isinstance(value, list):
sql_params[key] = value
else:
for i, elem in enumerate(value):
sql_params[key + str(i)] = elem
return sql_params
@staticmethod
def build_reports_sql_from(params):
"""
Generate and return the FROM part of the final SQL query.
"""
sql_from = ["FROM reports r"]
## Searching through plugins
if params["report_process"] == "plugin":
sql_from.append(("plugins_reports ON "
"plugins_reports.report_id = r.id"))
sql_from.append(("plugins ON "
"plugins_reports.plugin_id = plugins.id"))
sql_from = " JOIN ".join(sql_from)
return sql_from
@staticmethod
def build_reports_sql_where(params, sql_params, config):
"""Return a string containing the WHERE part of a search-related SQL
query.
"""
if hasattr(config, "webapi"):
config = config.webapi
sql_where = ["""
WHERE r.date_processed BETWEEN %(from_date)s AND %(to_date)s
"""]
sql_params["from_date"] = params["from_date"]
sql_params["to_date"] = params["to_date"]
## Adding terms to where clause
if params["terms"]:
if params["search_mode"] == "is_exactly":
sql_where.append("r.signature=%(term)s")
else:
sql_where.append("r.signature LIKE %(term)s")
sql_params["term"] = params["terms"]
## Adding products to where clause
if params["products"]:
products_list = ["r.product=%(product" + str(x) + ")s"
for x in range(len(params["products"]))]
sql_where.append("(%s)" % (" OR ".join(products_list)))
sql_params = add_param_to_dict(sql_params, "product",
params["products"])
## Adding OS to where clause
if params["os"]:
os_list = ["r.os_name=%(os" + str(x) + ")s"
for x in range(len(params["os"]))]
sql_where.append("(%s)" % (" OR ".join(os_list)))
sql_params = add_param_to_dict(sql_params, "os", params["os"])
## Adding versions to where clause
if params["versions"]:
versions_where = []
version_index = 0
# For each version, get information about it
for i in range(0, len(params["versions"]), 2):
versions_info = params["versions_info"]
product = params["versions"][i]
version = params["versions"][i + 1]
key = "%s:%s" % (product, version)
version_data = None
if key in versions_info:
version_data = versions_info[key]
if version_data and version_data["is_rapid_beta"]:
# If the version is a rapid beta, that means it's an
# alias for a list of other versions. We thus don't filter
# on that version, but on all versions listed in the
# version_data that we have.
# Get all versions that are linked to this rapid beta.
rapid_beta_versions = [
x for x in versions_info
if versions_info[x]["from_beta_version"] == key
and not versions_info[x]["is_rapid_beta"]
]
for rapid_beta in rapid_beta_versions:
versions_where.append(
PostgreSQLBase.build_version_where(
product,
versions_info[rapid_beta]["version_string"],
version_index,
sql_params,
versions_info[rapid_beta],
config
)
)
version_index += 2
else:
# This is a "normal" version, let's filter on it
versions_where.append(
PostgreSQLBase.build_version_where(
product,
version,
version_index,
sql_params,
version_data,
config
)
)
version_index += 2
# Bug 1000160. The logic to convert version numbers to what
# they should actually be has been moved to the processors.
# However, there will be a period of time when we will have
# both the "correct" version number and the "wrong" one in our
# database. We thus need to query for both here.
versions_where.append(
PostgreSQLBase.build_version_where(
product,
version,
version_index,
sql_params,
None,
config
)
)
version_index += 2
if versions_where:
sql_where.append("(%s)" % " OR ".join(versions_where))
## Adding build id to where clause
if params["build_ids"]:
build_ids_list = ["r.build=%(build" + str(x) + ")s"
for x in range(len(params["build_ids"]))]
sql_where.append("(%s)" % (" OR ".join(build_ids_list)))
sql_params = add_param_to_dict(sql_params, "build",
params["build_ids"])
## Adding reason to where clause
if params["reasons"]:
reasons_list = ["r.reason=%(reason" + str(x) + ")s"
for x in range(len(params["reasons"]))]
sql_where.append("(%s)" % (" OR ".join(reasons_list)))
sql_params = add_param_to_dict(sql_params, "reason",
params["reasons"])
## Adding release channels to where clause
if params["release_channels"]:
channels_list = [
"UPPER(r.release_channel)=UPPER(%%(release_channel%s)s)" % x
for x in range(len(params["release_channels"]))
]
sql_where.append("(%s)" % " OR ".join(channels_list))
sql_params = add_param_to_dict(
sql_params,
"release_channel",
params["release_channels"]
)
## Adding report type to where clause
if params["report_type"] == "crash":
sql_where.append("r.hangid IS NULL")
elif params["report_type"] == "hang":
sql_where.append("r.hangid IS NOT NULL")
## Searching through plugins
if params["report_process"] == "plugin":
sql_where.append("r.process_type = 'plugin'")
sql_where.append(("plugins_reports.date_processed BETWEEN "
"%(from_date)s AND %(to_date)s"))
if params["plugin_terms"]:
comp = "="
if params["plugin_search_mode"] in ("contains", "starts_with"):
comp = " LIKE "
sql_where_plugin_in = []
for f in params["plugin_in"]:
if f == "name":
field = "plugins.name"
elif f == "filename":
field = "plugins.filename"
sql_where_plugin_in.append(comp.join((field,
"%(plugin_term)s")))
sql_params["plugin_term"] = params["plugin_terms"]
sql_where.append("(%s)" % " OR ".join(sql_where_plugin_in))
elif params["report_process"] == "browser":
sql_where.append("r.process_type IS NULL")
elif params["report_process"] == "content":
sql_where.append("r.process_type = 'content'")
sql_where = " AND ".join(sql_where)
return (sql_where, sql_params)
@staticmethod
def build_reports_sql_limit(params, sql_params):
"""
"""
sql_limit = """
LIMIT %(limit)s
OFFSET %(offset)s
"""
sql_params["limit"] = params["result_number"]
sql_params["offset"] = params["result_offset"]
return (sql_limit, sql_params)
@staticmethod
def build_version_where(
product,
version,
version_index,
sql_params,
version_data,
config
):
"""Return the content of WHERE of a SQL query for a given version. """
version_where = []
product_param = "version%s" % version_index
version_param = "version%s" % (version_index + 1)
sql_params[product_param] = product
sql_params[version_param] = version
if version_data and version_data["release_channel"]:
# If we have data about that version, and it has a release channel,
# we will want to add some more specific filters to the SQL query.
channel = version_data["release_channel"].lower()
if channel.startswith(tuple(config.non_release_channels)):
# This is a non-release channel.
# Restrict by release_channel.
version_where.append("r.release_channel ILIKE '%s'" % channel)
if (
channel.startswith(tuple(config.restricted_channels)) and
version_data["build_id"]
):
# Restrict to a list of build_id.
builds = ", ".join(
"'%s'" % b for b in version_data["build_id"]
)
version_where.append("r.build IN (%s)" % builds)
else:
# It's a release.
version_where.append((
"r.release_channel NOT IN %s" %
(tuple(config.non_release_channels),)
))
version_where.append("r.product=%%(version%s)s" % version_index)
version_where.append("r.version=%%(version%s)s" % (version_index + 1))
return "(%s)" % " AND ".join(version_where)
| mpl-2.0 |
yongtang/tensorflow | tensorflow/lite/testing/op_tests/zeros_like.py | 17 | 2455 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for zeros_like."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_zeros_like_tests(options):
"""Make a set of tests to do zeros_like."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the zeros_like op testing graph."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
zeros = tf.zeros_like(input_tensor)
# This maximum node is so that toco can perform the constants-propagation
# through the above zeros_like, which it can't do if the output of the
# zeros_like as an output of the whole graphs (graph outputs can't be
# constants). If toco does not perform such constants-propagation then
# the resulting tflite graph retains the zeros_like as a Fill op, which
# is unsupported by TFLite, even as a custom op.
out = tf.maximum(zeros, input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| apache-2.0 |
yingyun001/ovirt-engine | packaging/setup/plugins/ovirt-engine-common/base/core/reconfigure.py | 8 | 2950 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2014-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Reconfigure env plugin."""
import gettext
from otopi import constants as otopicons
from otopi import plugin, util
from ovirt_engine_setup import constants as osetupcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Reconfigure env plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
after=(
otopicons.Stages.CORE_CONFIG_INIT,
),
)
def _init(self):
self.environment.setdefault(
osetupcons.CoreEnv.RECONFIGURE_OPTIONAL_COMPONENTS,
None
)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
before=(
osetupcons.Stages.DIALOG_TITLES_S_PRODUCT_OPTIONS,
),
)
def _customization(self):
if self.environment[
osetupcons.CoreEnv.RECONFIGURE_OPTIONAL_COMPONENTS
]:
consts = []
for constobj in self.environment[
osetupcons.CoreEnv.SETUP_ATTRS_MODULES
]:
consts.extend(constobj.__dict__['__osetup_attrs__'])
for c in consts:
for k in c.__dict__.values():
if (
hasattr(k, '__osetup_attrs__') and
k.__osetup_attrs__['reconfigurable']
):
k = k.fget(None)
if (
k in self.environment and
# We reset it only if it's disabled.
# Can't currently use this code to
# disable already-enabled components.
not self.environment[k]
):
self.logger.debug(
'Resetting optional component env key {key} '
'old value was {val}'.format(
key=k,
val=self.environment[k],
)
)
self.environment[k] = None
# vim: expandtab tabstop=4 shiftwidth=4
| apache-2.0 |
bcorbet/SickRage | lib/sqlalchemy/schema.py | 75 | 1103 | # schema.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Compatiblity namespace for sqlalchemy.sql.schema and related.
"""
from .sql.base import (
SchemaVisitor
)
from .sql.schema import (
CheckConstraint,
Column,
ColumnDefault,
Constraint,
DefaultClause,
DefaultGenerator,
FetchedValue,
ForeignKey,
ForeignKeyConstraint,
Index,
MetaData,
PassiveDefault,
PrimaryKeyConstraint,
SchemaItem,
Sequence,
Table,
ThreadLocalMetaData,
UniqueConstraint,
_get_table_key,
ColumnCollectionConstraint,
)
from .sql.naming import conv
from .sql.ddl import (
DDL,
CreateTable,
DropTable,
CreateSequence,
DropSequence,
CreateIndex,
DropIndex,
CreateSchema,
DropSchema,
_DropView,
CreateColumn,
AddConstraint,
DropConstraint,
DDLBase,
DDLElement,
_CreateDropBase,
_DDLCompiles
)
| gpl-3.0 |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/__init__.py | 1 | 52369 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-link/tlvs/tlv/adjacency-sid/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to an Adjacency SID
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__backup",
"__group",
"__sid_type",
"__sid_value",
"__weight",
"__multi_topology_identifier",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__backup = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="backup",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__group = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="group",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__sid_type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"LABEL": {}, "SID": {}},
),
is_leaf=True,
yang_name="sid-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:sr-sid-type",
is_config=False,
)
self.__sid_value = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sid-value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__multi_topology_identifier = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multi-topology-identifier",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-link",
"tlvs",
"tlv",
"adjacency-sid",
"state",
]
def _get_backup(self):
"""
Getter method for backup, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/backup (boolean)
YANG Description: When this flag is set, it indicates that the adjacency SID refers to
an adjacency which is eligible for protection
"""
return self.__backup
def _set_backup(self, v, load=False):
"""
Setter method for backup, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/backup (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_backup is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_backup() directly.
YANG Description: When this flag is set, it indicates that the adjacency SID refers to
an adjacency which is eligible for protection
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="backup",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """backup must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__backup = t
if hasattr(self, "_set"):
self._set()
def _unset_backup(self):
self.__backup = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="backup",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_group(self):
"""
Getter method for group, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/group (boolean)
YANG Description: When this flag is set it indicates that the adjacency SID refers to
a group of adjacencies that have a common value
"""
return self.__group
def _set_group(self, v, load=False):
"""
Setter method for group, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/group (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_group() directly.
YANG Description: When this flag is set it indicates that the adjacency SID refers to
a group of adjacencies that have a common value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="group",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """group must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__group = t
if hasattr(self, "_set"):
self._set()
def _unset_group(self):
self.__group = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="group",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_sid_type(self):
"""
Getter method for sid_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/sid_type (oc-ospf-types:sr-sid-type)
YANG Description: The type of the value contained within the sub-TLV
"""
return self.__sid_type
def _set_sid_type(self, v, load=False):
"""
Setter method for sid_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/sid_type (oc-ospf-types:sr-sid-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_type() directly.
YANG Description: The type of the value contained within the sub-TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"LABEL": {}, "SID": {}},
),
is_leaf=True,
yang_name="sid-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:sr-sid-type",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid_type must be of a type compatible with oc-ospf-types:sr-sid-type""",
"defined-type": "oc-ospf-types:sr-sid-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'LABEL': {}, 'SID': {}},), is_leaf=True, yang_name="sid-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-ospf-types:sr-sid-type', is_config=False)""",
}
)
self.__sid_type = t
if hasattr(self, "_set"):
self._set()
def _unset_sid_type(self):
self.__sid_type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"LABEL": {}, "SID": {}},
),
is_leaf=True,
yang_name="sid-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:sr-sid-type",
is_config=False,
)
def _get_sid_value(self):
"""
Getter method for sid_value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/sid_value (uint32)
YANG Description: The value of the binding included within the sub-TLV. The type of
this binding is indicated by the type leaf.
"""
return self.__sid_value
def _set_sid_value(self, v, load=False):
"""
Setter method for sid_value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/sid_value (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_value() directly.
YANG Description: The value of the binding included within the sub-TLV. The type of
this binding is indicated by the type leaf.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sid-value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid_value must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sid-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__sid_value = t
if hasattr(self, "_set"):
self._set()
def _unset_sid_value(self):
self.__sid_value = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sid-value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_weight(self):
"""
Getter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/weight (uint8)
YANG Description: The weight of the Adjacency SID when used for load-balancing
"""
return self.__weight
def _set_weight(self, v, load=False):
"""
Setter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/weight (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_weight is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_weight() directly.
YANG Description: The weight of the Adjacency SID when used for load-balancing
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """weight must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__weight = t
if hasattr(self, "_set"):
self._set()
def _unset_weight(self):
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_multi_topology_identifier(self):
"""
Getter method for multi_topology_identifier, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/multi_topology_identifier (uint8)
YANG Description: The multi-topology identifier with which the adjacency SID is
associated
"""
return self.__multi_topology_identifier
def _set_multi_topology_identifier(self, v, load=False):
"""
Setter method for multi_topology_identifier, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/multi_topology_identifier (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_multi_topology_identifier is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_multi_topology_identifier() directly.
YANG Description: The multi-topology identifier with which the adjacency SID is
associated
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multi-topology-identifier",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """multi_topology_identifier must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="multi-topology-identifier", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__multi_topology_identifier = t
if hasattr(self, "_set"):
self._set()
def _unset_multi_topology_identifier(self):
self.__multi_topology_identifier = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multi-topology-identifier",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
backup = __builtin__.property(_get_backup)
group = __builtin__.property(_get_group)
sid_type = __builtin__.property(_get_sid_type)
sid_value = __builtin__.property(_get_sid_value)
weight = __builtin__.property(_get_weight)
multi_topology_identifier = __builtin__.property(_get_multi_topology_identifier)
_pyangbind_elements = OrderedDict(
[
("backup", backup),
("group", group),
("sid_type", sid_type),
("sid_value", sid_value),
("weight", weight),
("multi_topology_identifier", multi_topology_identifier),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-link/tlvs/tlv/adjacency-sid/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to an Adjacency SID
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__backup",
"__group",
"__sid_type",
"__sid_value",
"__weight",
"__multi_topology_identifier",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__backup = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="backup",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__group = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="group",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__sid_type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"LABEL": {}, "SID": {}},
),
is_leaf=True,
yang_name="sid-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:sr-sid-type",
is_config=False,
)
self.__sid_value = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sid-value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__multi_topology_identifier = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multi-topology-identifier",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-link",
"tlvs",
"tlv",
"adjacency-sid",
"state",
]
def _get_backup(self):
"""
Getter method for backup, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/backup (boolean)
YANG Description: When this flag is set, it indicates that the adjacency SID refers to
an adjacency which is eligible for protection
"""
return self.__backup
def _set_backup(self, v, load=False):
"""
Setter method for backup, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/backup (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_backup is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_backup() directly.
YANG Description: When this flag is set, it indicates that the adjacency SID refers to
an adjacency which is eligible for protection
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="backup",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """backup must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="backup", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__backup = t
if hasattr(self, "_set"):
self._set()
def _unset_backup(self):
self.__backup = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="backup",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_group(self):
"""
Getter method for group, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/group (boolean)
YANG Description: When this flag is set it indicates that the adjacency SID refers to
a group of adjacencies that have a common value
"""
return self.__group
def _set_group(self, v, load=False):
"""
Setter method for group, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/group (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_group() directly.
YANG Description: When this flag is set it indicates that the adjacency SID refers to
a group of adjacencies that have a common value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="group",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """group must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__group = t
if hasattr(self, "_set"):
self._set()
def _unset_group(self):
self.__group = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="group",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_sid_type(self):
"""
Getter method for sid_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/sid_type (oc-ospf-types:sr-sid-type)
YANG Description: The type of the value contained within the sub-TLV
"""
return self.__sid_type
def _set_sid_type(self, v, load=False):
"""
Setter method for sid_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/sid_type (oc-ospf-types:sr-sid-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_type() directly.
YANG Description: The type of the value contained within the sub-TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"LABEL": {}, "SID": {}},
),
is_leaf=True,
yang_name="sid-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:sr-sid-type",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid_type must be of a type compatible with oc-ospf-types:sr-sid-type""",
"defined-type": "oc-ospf-types:sr-sid-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'LABEL': {}, 'SID': {}},), is_leaf=True, yang_name="sid-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-ospf-types:sr-sid-type', is_config=False)""",
}
)
self.__sid_type = t
if hasattr(self, "_set"):
self._set()
def _unset_sid_type(self):
self.__sid_type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"LABEL": {}, "SID": {}},
),
is_leaf=True,
yang_name="sid-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:sr-sid-type",
is_config=False,
)
def _get_sid_value(self):
"""
Getter method for sid_value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/sid_value (uint32)
YANG Description: The value of the binding included within the sub-TLV. The type of
this binding is indicated by the type leaf.
"""
return self.__sid_value
def _set_sid_value(self, v, load=False):
"""
Setter method for sid_value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/sid_value (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_value() directly.
YANG Description: The value of the binding included within the sub-TLV. The type of
this binding is indicated by the type leaf.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sid-value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid_value must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sid-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__sid_value = t
if hasattr(self, "_set"):
self._set()
def _unset_sid_value(self):
self.__sid_value = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sid-value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_weight(self):
"""
Getter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/weight (uint8)
YANG Description: The weight of the Adjacency SID when used for load-balancing
"""
return self.__weight
def _set_weight(self, v, load=False):
"""
Setter method for weight, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/weight (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_weight is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_weight() directly.
YANG Description: The weight of the Adjacency SID when used for load-balancing
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """weight must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__weight = t
if hasattr(self, "_set"):
self._set()
def _unset_weight(self):
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_multi_topology_identifier(self):
"""
Getter method for multi_topology_identifier, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/multi_topology_identifier (uint8)
YANG Description: The multi-topology identifier with which the adjacency SID is
associated
"""
return self.__multi_topology_identifier
def _set_multi_topology_identifier(self, v, load=False):
"""
Setter method for multi_topology_identifier, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_link/tlvs/tlv/adjacency_sid/state/multi_topology_identifier (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_multi_topology_identifier is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_multi_topology_identifier() directly.
YANG Description: The multi-topology identifier with which the adjacency SID is
associated
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multi-topology-identifier",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """multi_topology_identifier must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="multi-topology-identifier", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__multi_topology_identifier = t
if hasattr(self, "_set"):
self._set()
def _unset_multi_topology_identifier(self):
self.__multi_topology_identifier = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multi-topology-identifier",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
backup = __builtin__.property(_get_backup)
group = __builtin__.property(_get_group)
sid_type = __builtin__.property(_get_sid_type)
sid_value = __builtin__.property(_get_sid_value)
weight = __builtin__.property(_get_weight)
multi_topology_identifier = __builtin__.property(_get_multi_topology_identifier)
_pyangbind_elements = OrderedDict(
[
("backup", backup),
("group", group),
("sid_type", sid_type),
("sid_value", sid_value),
("weight", weight),
("multi_topology_identifier", multi_topology_identifier),
]
)
| apache-2.0 |
jlaunonen/kirppu | kirppu/migrations/0002_add_itemtype.py | 1 | 2253 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('kirppu', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UIText',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('identifier', models.CharField(help_text='Identifier of the textitem', unique=True, max_length=16, blank=True)),
('text', models.CharField(help_text='Textitem in UI', max_length=16384)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterModelOptions(
name='clerk',
options={'permissions': (('oversee', 'Can perform overseer actions'),)},
),
migrations.AddField(
model_name='item',
name='adult',
field=models.CharField(default=b'no', max_length=8, choices=[(b'yes', 'Item allowed only to adult shoppers, contains porn etc.'), (b'no', 'Item allowed to all shoppers')]),
preserve_default=True,
),
migrations.AddField(
model_name='item',
name='itemtype',
field=models.CharField(default=b'other', max_length=24, choices=[(b'manga-finnish', 'Finnish manga book'), (b'manga-english', 'English manga book'), (b'manga-other', 'Manga book in another language'), (b'book', 'Non-manga book'), (b'magazine', 'Magazine'), (b'movie-tv', 'Movie or TV-series'), (b'game', 'Game'), (b'figurine-plushie', 'Figurine or a stuffed toy'), (b'clothing', 'Clothing'), (b'other', 'Other item')]),
preserve_default=True,
),
migrations.AlterField(
model_name='clerk',
name='access_key',
field=models.CharField(null=True, validators=[django.core.validators.RegexValidator(b'^[0-9a-fA-F]{14}$', message=b'Must be 14 hex chars.')], max_length=128, blank=True, help_text='Access code assigned to the clerk. 14 hexlets.', unique=True, verbose_name='Access key value'),
preserve_default=True,
),
]
| mit |
doubi-workshop/grpc | src/python/grpcio_test/grpc_test/beta/_beta_features_test.py | 6 | 8586 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Face interface compliance of the gRPC Python Beta API."""
import threading
import unittest
from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities
from grpc_test import resources
from grpc_test.beta import test_utilities
from grpc_test.framework.common import test_constants
_SERVER_HOST_OVERRIDE = 'foo.test.google.fr'
_GROUP = 'group'
_UNARY_UNARY = 'unary-unary'
_UNARY_STREAM = 'unary-stream'
_STREAM_UNARY = 'stream-unary'
_STREAM_STREAM = 'stream-stream'
_REQUEST = b'abc'
_RESPONSE = b'123'
class _Servicer(object):
def __init__(self):
self._condition = threading.Condition()
self._peer = None
self._serviced = False
def unary_unary(self, request, context):
with self._condition:
self._request = request
self._peer = context.protocol_context().peer()
context.protocol_context().disable_next_response_compression()
self._serviced = True
self._condition.notify_all()
return _RESPONSE
def unary_stream(self, request, context):
with self._condition:
self._request = request
self._peer = context.protocol_context().peer()
context.protocol_context().disable_next_response_compression()
self._serviced = True
self._condition.notify_all()
return
yield
def stream_unary(self, request_iterator, context):
for request in request_iterator:
self._request = request
with self._condition:
self._peer = context.protocol_context().peer()
context.protocol_context().disable_next_response_compression()
self._serviced = True
self._condition.notify_all()
return _RESPONSE
def stream_stream(self, request_iterator, context):
for request in request_iterator:
with self._condition:
self._peer = context.protocol_context().peer()
context.protocol_context().disable_next_response_compression()
yield _RESPONSE
with self._condition:
self._serviced = True
self._condition.notify_all()
def peer(self):
with self._condition:
return self._peer
def block_until_serviced(self):
with self._condition:
while not self._serviced:
self._condition.wait()
class _BlockingIterator(object):
def __init__(self, upstream):
self._condition = threading.Condition()
self._upstream = upstream
self._allowed = []
def __iter__(self):
return self
def next(self):
with self._condition:
while True:
if self._allowed is None:
raise StopIteration()
elif self._allowed:
return self._allowed.pop(0)
else:
self._condition.wait()
def allow(self):
with self._condition:
try:
self._allowed.append(next(self._upstream))
except StopIteration:
self._allowed = None
self._condition.notify_all()
class BetaFeaturesTest(unittest.TestCase):
def setUp(self):
self._servicer = _Servicer()
method_implementations = {
(_GROUP, _UNARY_UNARY):
utilities.unary_unary_inline(self._servicer.unary_unary),
(_GROUP, _UNARY_STREAM):
utilities.unary_stream_inline(self._servicer.unary_stream),
(_GROUP, _STREAM_UNARY):
utilities.stream_unary_inline(self._servicer.stream_unary),
(_GROUP, _STREAM_STREAM):
utilities.stream_stream_inline(self._servicer.stream_stream),
}
cardinalities = {
_UNARY_UNARY: cardinality.Cardinality.UNARY_UNARY,
_UNARY_STREAM: cardinality.Cardinality.UNARY_STREAM,
_STREAM_UNARY: cardinality.Cardinality.STREAM_UNARY,
_STREAM_STREAM: cardinality.Cardinality.STREAM_STREAM,
}
server_options = implementations.server_options(
thread_pool_size=test_constants.POOL_SIZE)
self._server = implementations.server(
method_implementations, options=server_options)
server_credentials = implementations.ssl_server_credentials(
[(resources.private_key(), resources.certificate_chain(),),])
port = self._server.add_secure_port('[::]:0', server_credentials)
self._server.start()
self._client_credentials = implementations.ssl_client_credentials(
resources.test_root_certificates(), None, None)
channel = test_utilities.not_really_secure_channel(
'localhost', port, self._client_credentials, _SERVER_HOST_OVERRIDE)
stub_options = implementations.stub_options(
thread_pool_size=test_constants.POOL_SIZE)
self._dynamic_stub = implementations.dynamic_stub(
channel, _GROUP, cardinalities, options=stub_options)
def tearDown(self):
self._dynamic_stub = None
self._server.stop(test_constants.SHORT_TIMEOUT).wait()
def test_unary_unary(self):
call_options = interfaces.grpc_call_options(
disable_compression=True, credentials=self._client_credentials)
response = getattr(self._dynamic_stub, _UNARY_UNARY)(
_REQUEST, test_constants.LONG_TIMEOUT, protocol_options=call_options)
self.assertEqual(_RESPONSE, response)
self.assertIsNotNone(self._servicer.peer())
def test_unary_stream(self):
call_options = interfaces.grpc_call_options(
disable_compression=True, credentials=self._client_credentials)
response_iterator = getattr(self._dynamic_stub, _UNARY_STREAM)(
_REQUEST, test_constants.LONG_TIMEOUT, protocol_options=call_options)
self._servicer.block_until_serviced()
self.assertIsNotNone(self._servicer.peer())
def test_stream_unary(self):
call_options = interfaces.grpc_call_options(
credentials=self._client_credentials)
request_iterator = _BlockingIterator(iter((_REQUEST,)))
response_future = getattr(self._dynamic_stub, _STREAM_UNARY).future(
request_iterator, test_constants.LONG_TIMEOUT,
protocol_options=call_options)
response_future.protocol_context().disable_next_request_compression()
request_iterator.allow()
response_future.protocol_context().disable_next_request_compression()
request_iterator.allow()
self._servicer.block_until_serviced()
self.assertIsNotNone(self._servicer.peer())
self.assertEqual(_RESPONSE, response_future.result())
def test_stream_stream(self):
call_options = interfaces.grpc_call_options(
credentials=self._client_credentials)
request_iterator = _BlockingIterator(iter((_REQUEST,)))
response_iterator = getattr(self._dynamic_stub, _STREAM_STREAM)(
request_iterator, test_constants.SHORT_TIMEOUT,
protocol_options=call_options)
response_iterator.protocol_context().disable_next_request_compression()
request_iterator.allow()
response = next(response_iterator)
response_iterator.protocol_context().disable_next_request_compression()
request_iterator.allow()
self._servicer.block_until_serviced()
self.assertIsNotNone(self._servicer.peer())
self.assertEqual(_RESPONSE, response)
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
samuknet/servo | tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/handshake/_base.py | 652 | 6143 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Common functions and exceptions used by WebSocket opening handshake
processors.
"""
from mod_pywebsocket import common
from mod_pywebsocket import http_header_util
class AbortedByUserException(Exception):
"""Exception for aborting a connection intentionally.
If this exception is raised in do_extra_handshake handler, the connection
will be abandoned. No other WebSocket or HTTP(S) handler will be invoked.
If this exception is raised in transfer_data_handler, the connection will
be closed without closing handshake. No other WebSocket or HTTP(S) handler
will be invoked.
"""
pass
class HandshakeException(Exception):
"""This exception will be raised when an error occurred while processing
WebSocket initial handshake.
"""
def __init__(self, name, status=None):
super(HandshakeException, self).__init__(name)
self.status = status
class VersionException(Exception):
"""This exception will be raised when a version of client request does not
match with version the server supports.
"""
def __init__(self, name, supported_versions=''):
"""Construct an instance.
Args:
supported_version: a str object to show supported hybi versions.
(e.g. '8, 13')
"""
super(VersionException, self).__init__(name)
self.supported_versions = supported_versions
def get_default_port(is_secure):
if is_secure:
return common.DEFAULT_WEB_SOCKET_SECURE_PORT
else:
return common.DEFAULT_WEB_SOCKET_PORT
def validate_subprotocol(subprotocol):
"""Validate a value in the Sec-WebSocket-Protocol field.
See the Section 4.1., 4.2.2., and 4.3. of RFC 6455.
"""
if not subprotocol:
raise HandshakeException('Invalid subprotocol name: empty')
# Parameter should be encoded HTTP token.
state = http_header_util.ParsingState(subprotocol)
token = http_header_util.consume_token(state)
rest = http_header_util.peek(state)
# If |rest| is not None, |subprotocol| is not one token or invalid. If
# |rest| is None, |token| must not be None because |subprotocol| is
# concatenation of |token| and |rest| and is not None.
if rest is not None:
raise HandshakeException('Invalid non-token string in subprotocol '
'name: %r' % rest)
def parse_host_header(request):
fields = request.headers_in[common.HOST_HEADER].split(':', 1)
if len(fields) == 1:
return fields[0], get_default_port(request.is_https())
try:
return fields[0], int(fields[1])
except ValueError, e:
raise HandshakeException('Invalid port number format: %r' % e)
def format_header(name, value):
return '%s: %s\r\n' % (name, value)
def get_mandatory_header(request, key):
value = request.headers_in.get(key)
if value is None:
raise HandshakeException('Header %s is not defined' % key)
return value
def validate_mandatory_header(request, key, expected_value, fail_status=None):
value = get_mandatory_header(request, key)
if value.lower() != expected_value.lower():
raise HandshakeException(
'Expected %r for header %s but found %r (case-insensitive)' %
(expected_value, key, value), status=fail_status)
def check_request_line(request):
# 5.1 1. The three character UTF-8 string "GET".
# 5.1 2. A UTF-8-encoded U+0020 SPACE character (0x20 byte).
if request.method != 'GET':
raise HandshakeException('Method is not GET: %r' % request.method)
if request.protocol != 'HTTP/1.1':
raise HandshakeException('Version is not HTTP/1.1: %r' %
request.protocol)
def parse_token_list(data):
"""Parses a header value which follows 1#token and returns parsed elements
as a list of strings.
Leading LWSes must be trimmed.
"""
state = http_header_util.ParsingState(data)
token_list = []
while True:
token = http_header_util.consume_token(state)
if token is not None:
token_list.append(token)
http_header_util.consume_lwses(state)
if http_header_util.peek(state) is None:
break
if not http_header_util.consume_string(state, ','):
raise HandshakeException(
'Expected a comma but found %r' % http_header_util.peek(state))
http_header_util.consume_lwses(state)
if len(token_list) == 0:
raise HandshakeException('No valid token found')
return token_list
# vi:sts=4 sw=4 et
| mpl-2.0 |
DaniilLeksin/theblog | env/lib/python2.7/site-packages/django/conf/locale/de_CH/formats.py | 82 | 1451 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
from __future__ import unicode_literals
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
# these are the separators for non-monetary numbers. For monetary numbers,
# the DECIMAL_SEPARATOR is a . (decimal point) and the THOUSAND_SEPARATOR is a
# ' (single quote).
# For details, please refer to http://www.bk.admin.ch/dokumentation/sprachen/04915/05016/index.html?lang=de
# (in German) and the documentation
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| gpl-2.0 |
k21/buck | test/com/facebook/buck/testutil/integration/project_workspace.py | 7 | 3058 | # Copyright 2016-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import platform
import shutil
import subprocess
import sys
import tempfile
def run_buck_process(command, cwd=None):
root_directory = os.getcwd()
cwd = cwd or root_directory
if platform.system() == 'Windows':
buck_path = os.path.join(root_directory, 'bin', 'buck.bat')
args = ['cmd.exe', '/C', buck_path] + list(command)
else:
buck_path = os.path.join(root_directory, 'bin', 'buck')
args = [buck_path] + list(command)
# Pass thru our environment, except disabling buckd so that we can be sure the right buck
# is run.
child_environment = dict(os.environ)
child_environment["NO_BUCKD"] = "1"
return subprocess.Popen(
args,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=child_environment)
class ProjectWorkspace(object):
is_windows = platform.system() == 'Windows'
def __init__(self, template_data_directory):
self._template_data_directory = template_data_directory
self._temp_dir = tempfile.mkdtemp()
self.test_data_directory = os.path.join(self._temp_dir, "test")
def __enter__(self):
shutil.copytree(self._template_data_directory, self.test_data_directory)
for root, dirs, files in os.walk(self.test_data_directory):
for f in files:
filename, fileext = os.path.splitext(f)
if fileext == '.fixture':
os.rename(os.path.join(root, f), os.path.join(root, filename))
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.is_windows:
# We do this due to bug: http://bugs.python.org/issue22022
subprocess.call(['rd', '/S', '/Q', self._temp_dir], shell=True)
else:
shutil.rmtree(self._temp_dir)
def resolve_path(self, path):
return os.path.join(self.test_data_directory, path)
def run_buck(self, *command):
proc = run_buck_process(command, self.test_data_directory)
stdout, stderr = proc.communicate()
# Copy output through to unittest's output so failures are easy to debug. Can't just
# provide sys.stdout/sys.stderr to Popen because unittest has replaced the streams with
# things that aren't directly compatible with Popen.
sys.stdout.write(stdout)
sys.stdout.flush()
sys.stderr.write(stderr)
sys.stderr.flush()
return proc.returncode
| apache-2.0 |
kashif/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 25 | 2004 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
dinau/micropython | tests/basics/struct1.py | 8 | 3085 | try:
import ustruct as struct
except:
import struct
print(struct.calcsize("<bI"))
print(struct.unpack("<bI", b"\x80\0\0\x01\0"))
print(struct.calcsize(">bI"))
print(struct.unpack(">bI", b"\x80\0\0\x01\0"))
# 32-bit little-endian specific
#print(struct.unpack("bI", b"\x80\xaa\x55\xaa\0\0\x01\0"))
print(struct.pack("<i", 1))
print(struct.pack(">i", 1))
print(struct.pack("<h", 1))
print(struct.pack(">h", 1))
print(struct.pack("<b", 1))
print(struct.pack(">b", 1))
print(struct.pack("<bI", -128, 256))
print(struct.pack(">bI", -128, 256))
print(struct.calcsize("100sI"))
print(struct.calcsize("97sI"))
print(struct.unpack("<6sH", b"foo\0\0\0\x12\x34"))
print(struct.pack("<6sH", b"foo", 10000))
s = struct.pack("BHBI", 10, 100, 200, 300)
v = struct.unpack("BHBI", s)
print(v == (10, 100, 200, 300))
# check maximum pack on 32-bit machine
print(struct.pack("<I", 2**32 - 1))
print(struct.pack("<I", 0xffffffff))
# long long ints
print(struct.pack("<Q", 2**64 - 1))
print(struct.pack(">Q", 2**64 - 1))
print(struct.pack("<Q", 0xffffffffffffffff))
print(struct.pack(">Q", 0xffffffffffffffff))
print(struct.pack("<q", -1))
print(struct.pack(">q", -1))
print(struct.pack("<Q", 1234567890123456789))
print(struct.pack("<q", -1234567890123456789))
print(struct.pack(">Q", 1234567890123456789))
print(struct.pack(">q", -1234567890123456789))
print(struct.unpack("<Q", b"\x12\x34\x56\x78\x90\x12\x34\x56"))
print(struct.unpack(">Q", b"\x12\x34\x56\x78\x90\x12\x34\x56"))
print(struct.unpack("<q", b"\x12\x34\x56\x78\x90\x12\x34\xf6"))
print(struct.unpack(">q", b"\xf2\x34\x56\x78\x90\x12\x34\x56"))
# check maximum unpack
print(struct.unpack("<I", b"\xff\xff\xff\xff"))
print(struct.unpack("<Q", b"\xff\xff\xff\xff\xff\xff\xff\xff"))
# network byte order
print(struct.pack('!i', 123))
# first arg must be a string
try:
struct.pack(1, 2)
except TypeError:
print('TypeError')
# make sure that unknown types are detected
try:
struct.pack("z", 1)
except:
print("Unknown type")
# Initially repitition counters were supported only for strings,
# but later were implemented for all.
print(struct.unpack("<3B2h", b"foo\x12\x34\xff\xff"))
print(struct.pack("<3B", 1, 2, 3))
# pack_into
buf = bytearray(b'>>>123<<<')
struct.pack_into('<bbb', buf, 3, 0x41, 0x42, 0x43)
print(buf)
struct.pack_into('<bbb', buf, -6, 0x44, 0x45, 0x46)
print(buf)
try:
struct.pack_into('<bbb', buf, 7, 0x41, 0x42, 0x43)
except:
print('struct.error')
try:
struct.pack_into('<bbb', buf, -10, 0x41, 0x42, 0x43)
except:
print('struct.error')
# unpack_from
buf = b'0123456789'
print(struct.unpack_from('<b', buf, 4))
print(struct.unpack_from('<b', buf, -4))
try:
print(struct.unpack_from('<b', buf, 10))
except:
print('struct.error')
try:
print(struct.unpack_from('<b', buf, -11))
except:
print('struct.error')
# pack with too many args, not checked by uPy
#try:
# print(struct.pack('ii', 1, 2, 3))
#except:
# print('struct.error')
# pack with too few args, not checked by uPy
#try:
# print(struct.pack('ii', 1))
#except:
# print('struct.error')
| mit |
johndpope/tensorflow | tensorflow/contrib/bayesflow/__init__.py | 57 | 1871 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for representing Bayesian computation.
## This package provides classes for Bayesian computation with TensorFlow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.contrib.bayesflow.python.ops import entropy
from tensorflow.contrib.bayesflow.python.ops import monte_carlo
from tensorflow.contrib.bayesflow.python.ops import stochastic_gradient_estimators
from tensorflow.contrib.bayesflow.python.ops import stochastic_graph
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor
from tensorflow.contrib.bayesflow.python.ops import stochastic_variables
from tensorflow.contrib.bayesflow.python.ops import variational_inference
# pylint: enable=unused-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['entropy', 'monte_carlo',
'special_math', 'stochastic_gradient_estimators',
'stochastic_graph', 'stochastic_tensor',
'stochastic_variables', 'variational_inference']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
tensorflow/privacy | tensorflow_privacy/privacy/estimators/dnn.py | 1 | 2340 | # Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Estimator heads that allow integration with TF Privacy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_privacy.privacy.estimators import head_utils
from tensorflow_estimator.python.estimator import estimator
from tensorflow_estimator.python.estimator.canned import dnn
class DNNClassifier(tf.estimator.Estimator):
"""DP version of `tf.estimator.DNNClassifier`."""
def __init__(
self,
hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer=None,
activation_fn=tf.nn.relu,
dropout=None,
config=None,
warm_start_from=None,
loss_reduction=tf.keras.losses.Reduction.NONE,
batch_norm=False,
):
"""See `tf.estimator.DNNClassifier`."""
head = head_utils.binary_or_multi_class_head(
n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
estimator._canned_estimator_api_gauge.get_cell('Classifier').set('DNN')
def _model_fn(features, labels, mode, config):
return dnn.dnn_model_fn_v2(
features=features,
labels=labels,
mode=mode,
head=head,
hidden_units=hidden_units,
feature_columns=tuple(feature_columns or []),
optimizer=optimizer,
activation_fn=activation_fn,
dropout=dropout,
config=config,
batch_norm=batch_norm)
super(DNNClassifier, self).__init__(
model_fn=_model_fn,
model_dir=model_dir,
config=config,
warm_start_from=warm_start_from)
| apache-2.0 |
someorz/spark | dev/github_jira_sync.py | 52 | 5299 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for updating JIRA's with information about Github pull requests
import json
import os
import re
import sys
import urllib2
try:
import jira.client
except ImportError:
print("This tool requires the jira-python library")
print("Install using 'sudo pip install jira'")
sys.exit(-1)
# User facing configs
GITHUB_API_BASE = os.environ.get("GITHUB_API_BASE", "https://api.github.com/repos/apache/spark")
JIRA_PROJECT_NAME = os.environ.get("JIRA_PROJECT_NAME", "SPARK")
JIRA_API_BASE = os.environ.get("JIRA_API_BASE", "https://issues.apache.org/jira")
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", "apachespark")
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", "XXX")
# Maximum number of updates to perform in one run
MAX_UPDATES = int(os.environ.get("MAX_UPDATES", "100000"))
# Cut-off for oldest PR on which to comment. Useful for avoiding
# "notification overload" when running for the first time.
MIN_COMMENT_PR = int(os.environ.get("MIN_COMMENT_PR", "1496"))
# File used as an opitimization to store maximum previously seen PR
# Used mostly because accessing ASF JIRA is slow, so we want to avoid checking
# the state of JIRA's that are tied to PR's we've already looked at.
MAX_FILE = ".github-jira-max"
def get_url(url):
try:
return urllib2.urlopen(url)
except urllib2.HTTPError:
print("Unable to fetch URL, exiting: %s" % url)
sys.exit(-1)
def get_json(urllib_response):
return json.load(urllib_response)
# Return a list of (JIRA id, JSON dict) tuples:
# e.g. [('SPARK-1234', {.. json ..}), ('SPARK-5687', {.. json ..})}
def get_jira_prs():
result = []
has_next_page = True
page_num = 0
while has_next_page:
page = get_url(GITHUB_API_BASE + "/pulls?page=%s&per_page=100" % page_num)
page_json = get_json(page)
for pull in page_json:
jiras = re.findall(JIRA_PROJECT_NAME + "-[0-9]{4,5}", pull['title'])
for jira in jiras:
result = result + [(jira, pull)]
# Check if there is another page
link_header = filter(lambda k: k.startswith("Link"), page.info().headers)[0]
if "next" not in link_header:
has_next_page = False
else:
page_num += 1
return result
def set_max_pr(max_val):
f = open(MAX_FILE, 'w')
f.write("%s" % max_val)
f.close()
print("Writing largest PR number seen: %s" % max_val)
def get_max_pr():
if os.path.exists(MAX_FILE):
result = int(open(MAX_FILE, 'r').read())
print("Read largest PR number previously seen: %s" % result)
return result
else:
return 0
jira_client = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
jira_prs = get_jira_prs()
previous_max = get_max_pr()
print("Retrieved %s JIRA PR's from Github" % len(jira_prs))
jira_prs = [(k, v) for k, v in jira_prs if int(v['number']) > previous_max]
print("%s PR's remain after excluding visted ones" % len(jira_prs))
num_updates = 0
considered = []
for issue, pr in sorted(jira_prs, key=lambda kv: int(kv[1]['number'])):
if num_updates >= MAX_UPDATES:
break
pr_num = int(pr['number'])
print("Checking issue %s" % issue)
considered = considered + [pr_num]
url = pr['html_url']
title = "[Github] Pull Request #%s (%s)" % (pr['number'], pr['user']['login'])
try:
existing_links = map(lambda l: l.raw['object']['url'], jira_client.remote_links(issue))
except:
print("Failure reading JIRA %s (does it exist?)" % issue)
print(sys.exc_info()[0])
continue
if url in existing_links:
continue
icon = {"title": "Pull request #%s" % pr['number'],
"url16x16": "https://assets-cdn.github.com/favicon.ico"}
destination = {"title": title, "url": url, "icon": icon}
# For all possible fields see:
# https://developer.atlassian.com/display/JIRADEV/Fields+in+Remote+Issue+Links
# application = {"name": "Github pull requests", "type": "org.apache.spark.jira.github"}
jira_client.add_remote_link(issue, destination)
comment = "User '%s' has created a pull request for this issue:" % pr['user']['login']
comment += "\n%s" % pr['html_url']
if pr_num >= MIN_COMMENT_PR:
jira_client.add_comment(issue, comment)
print("Added link %s <-> PR #%s" % (issue, pr['number']))
num_updates += 1
if len(considered) > 0:
set_max_pr(max(considered))
| apache-2.0 |
kevinthesun/mxnet | python/mxnet/gluon/loss.py | 2 | 15374 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=arguments-differ
""" losses for training neural networks """
from __future__ import absolute_import
from .. import ndarray
from ..contrib import symbol as symbol_contrib
from ..contrib import ndarray as ndarray_contrib
from ..base import numeric_types
from .block import HybridBlock
def _apply_weighting(F, loss, weight=None, sample_weight=None):
"""Apply weighting to loss.
Parameters
----------
loss : Symbol
The loss to be weighted.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch separately, `sample_weight` should have
shape (64, 1).
Returns
-------
loss : Symbol
Weighted loss
"""
if sample_weight is not None:
loss = F.broadcast_mul(loss, sample_weight)
if weight is not None:
assert isinstance(weight, numeric_types), "weight must be a number"
loss = loss * weight
return loss
def _reshape_label_as_output(F, output, label):
# for symbolic output.shape is not available so we reshape
# to empty shape and let it be inferred from output's shape
# via the '-' operator later.
return label.reshape(output.shape) if F is ndarray else label.reshape(())
class Loss(HybridBlock):
"""Base class for loss.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, weight, batch_axis, **kwargs):
super(Loss, self).__init__(**kwargs)
self._weight = weight
self._batch_axis = batch_axis
def __repr__(self):
s = '{name}(batch_axis={_batch_axis}, w={_weight})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def hybrid_forward(self, F, x, *args, **kwargs):
"""Overrides to construct symbolic graph for this `Block`.
Parameters
----------
x : Symbol or NDArray
The first input tensor.
*args : list of Symbol or list of NDArray
Additional input tensors.
"""
# pylint: disable= invalid-name
raise NotImplementedError
class L2Loss(Loss):
"""Calculates the mean squared error between output and label:
.. math::
L = \\frac{1}{2}\\sum_i \\Vert {output}_i - {label}_i \\Vert^2.
Output and label can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch, `sample_weight` should have shape (64, 1).
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, weight=1., batch_axis=0, **kwargs):
super(L2Loss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, output, label, sample_weight=None):
label = _reshape_label_as_output(F, output, label)
loss = F.square(output - label)
loss = _apply_weighting(F, loss, self._weight/2, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class L1Loss(Loss):
"""Calculates the mean absolute error between output and label:
.. math::
L = \\frac{1}{2}\\sum_i \\vert {output}_i - {label}_i \\vert.
Output and label must have the same shape.
Parameters
----------
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch, `sample_weight` should have shape (64, 1).
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, weight=None, batch_axis=0, **kwargs):
super(L1Loss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, output, label, sample_weight=None):
label = _reshape_label_as_output(F, output, label)
loss = F.abs(output - label)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class SigmoidBinaryCrossEntropyLoss(Loss):
r"""The cross-entropy loss for binary classification. (alias: SigmoidBCELoss)
BCE loss is useful when training logistic regression.
.. math::
loss(o, t) = - 1/n \sum_i (t[i] * log(o[i]) + (1 - t[i]) * log(1 - o[i]))
Parameters
----------
from_sigmoid : bool, default is `False`
Whether the input is from the output of sigmoid. Set this to false will make
the loss calculate sigmoid and then BCE, which is more numerically stable through
log-sum-exp trick.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch, `sample_weight` should have shape (64, 1).
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, from_sigmoid=False, weight=None, batch_axis=0, **kwargs):
super(SigmoidBinaryCrossEntropyLoss, self).__init__(weight, batch_axis, **kwargs)
self._from_sigmoid = from_sigmoid
def hybrid_forward(self, F, output, label, sample_weight=None):
label = _reshape_label_as_output(F, output, label)
if not self._from_sigmoid:
max_val = F.maximum(-output, 0)
loss = output - output*label + max_val + F.log(F.exp(-max_val)+F.exp(-output-max_val))
else:
loss = -(F.log(output+1e-12)*label + F.log(1.-output+1e-12)*(1.-label))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
SigmoidBCELoss = SigmoidBinaryCrossEntropyLoss
class SoftmaxCrossEntropyLoss(Loss):
"""Computes the softmax cross entropy loss. (alias: SoftmaxCELoss)
If `sparse_label` is `True`, label should contain integer category indicators:
.. math::
p = {softmax}({output})
L = -\\sum_i {log}(p_{i,{label}_i})
Label's shape should be output's shape without the `axis` dimension. i.e. for
`output.shape` = (1,2,3,4) and axis = 2, `label.shape` should be (1,2,4).
If `sparse_label` is `False`, label should contain probability distribution
with the same shape as output:
.. math::
p = {softmax}({output})
L = -\\sum_i \\sum_j {label}_j {log}(p_{ij})
Parameters
----------
axis : int, default -1
The axis to sum over when computing softmax and entropy.
sparse_label : bool, default True
Whether label is an integer array instead of probability distribution.
from_logits : bool, default False
Whether input is a log probability (usually from log_softmax) instead
of unnormalized numbers.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch, `sample_weight` should have shape (64, 1).
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, axis=-1, sparse_label=True, from_logits=False, weight=None,
batch_axis=0, **kwargs):
super(SoftmaxCrossEntropyLoss, self).__init__(weight, batch_axis, **kwargs)
self._axis = axis
self._sparse_label = sparse_label
self._from_logits = from_logits
def hybrid_forward(self, F, output, label, sample_weight=None):
if not self._from_logits:
output = F.log_softmax(output)
if self._sparse_label:
loss = -F.pick(output, label, axis=self._axis, keepdims=True)
else:
loss = -F.sum(output*label, axis=self._axis, keepdims=True)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
SoftmaxCELoss = SoftmaxCrossEntropyLoss
class KLDivLoss(Loss):
"""The Kullback-Leibler divergence loss.
KL divergence is a useful distance measure for continuous distributions
and is often useful when performing direct regression over the space of
(discretely sampled) continuous output distributions.
.. _Kullback-Leibler divergence:
https://en.wikipedia.org/wiki/Kullback-Leibler_divergence
.. math::
L = 1/n \\sum_i (label_i * (log(label_i) - output_i))
Label's shape should be the same as output's.
Parameters
----------
from_logits : bool, default is `True`
Whether the input is log probability (usually from log_softmax) instead
of unnormalized numbers.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch, `sample_weight` should have shape (64, 1).
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, from_logits=True, weight=None, batch_axis=0, **kwargs):
super(KLDivLoss, self).__init__(weight, batch_axis, **kwargs)
self._from_logits = from_logits
def hybrid_forward(self, F, output, label, sample_weight=None):
if not self._from_logits:
output = F.log_softmax(output)
loss = label * (F.log(label+1e-12) - output)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class CTCLoss(Loss):
r"""Connectionist Temporal Classification Loss.
See `"Connectionist Temporal Classification: Labelling Unsegmented
Sequence Data with Recurrent Neural Networks"
<http://www.cs.toronto.edu/~graves/icml_2006.pdf>`_ paper for more information.
Parameters
----------
layout : str, default 'NTC'
Layout of the output sequence activation vector.
label_layout : str, default 'NT'
Layout of the labels.
padding_mask : int or None, default -1
This is the label value to be considered padding, which is used to derive the actual
lengths of labels. Only required when `label_lengths` is None.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch, `sample_weight` should have shape (64, 1).
This should be used as the fifth argument when calling this loss.
Input shapes:
`data` is an activation tensor without softmax.
Its shape depends on `layout`. For `layout='TNC'`, this
input has shape `(sequence_length, batch_size, alphabet_size)`
`label` is the label index matrix.
Its shape depends on `label_layout`. For `label_layout='TN'`, this
input has shape `(label_sequence_length, batch_size)`
When `label_lengths` is not specified, the first occurrence of `padding_mask`
in each sample marks the end of the label sequence of that sample.
For example, suppose there are two samples, with *label_sequence_length* = 4.
The two sequences of labels are [2, 1] and [3, 2, 2], and their actual lengths
are smaller than 4. Thus, given *padding_mask* = 0, the resulting ```label```
tensor should be padded to be::
[[2, 1, 0, 0], [3, 2, 2, 0]]
`data_lengths` is optional and defaults to None.
When specified, it represents the actual lengths of data.
The shape should be (batch_size,).
If None, the data lengths are treated as being equal to the max sequence length.
This should be used as the third argument when calling this loss.
`label_lengths` is optional and defaults to None.
When specified, it represents the actual lengths of labels.
The shape should be (batch_size,).
If None, the label lengths are derived from the first occurrence of
the value specified by `padding_mask`.
This should be used as the fourth argument when calling this loss.
Output shape:
The CTC loss output has the shape (batch_size,).
"""
def __init__(self, layout='NTC', label_layout='NT', padding_mask=-1,
weight=None, **kwargs):
assert layout in ['NTC', 'TNC'],\
"Only 'NTC' and 'TNC' layouts for output are supported. Got: %s"%layout
assert label_layout in ['NT', 'TN'],\
"Only 'NT' and 'TN' layouts for label are supported. Got: %s"%label_layout
self._layout = layout
self._label_layout = label_layout
self._padding_mask = padding_mask
batch_axis = label_layout.find('N')
super(CTCLoss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, data, label,
data_lengths=None, label_lengths=None, sample_weight=None):
if self._layout == 'NTC':
data = F.swapaxes(data, 0, 1)
if self._batch_axis == 1:
label = F.swapaxes(label, 0, 1)
if F is ndarray:
F_contrib = ndarray_contrib
else:
F_contrib = symbol_contrib
loss = F_contrib.CTCLoss(data, label,
use_data_lengths=data_lengths is not None,
use_label_lengths=label_lengths is not None,
data_lengths=data_lengths, label_lengths=label_lengths,
padding_mask=self._padding_mask)
return _apply_weighting(F, loss, self._weight, sample_weight)
| apache-2.0 |
xushiwei/nova | nova/console/vmrc.py | 4 | 4521 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VMRC console drivers."""
import base64
import json
from nova import exception
from nova import flags
from nova import log as logging
from nova.virt.vmwareapi import vim_util
FLAGS = flags.FLAGS
flags.DEFINE_integer('console_vmrc_port',
443,
"port for VMware VMRC connections")
flags.DEFINE_integer('console_vmrc_error_retries',
10,
"number of retries for retrieving VMRC information")
class VMRCConsole(object):
"""VMRC console driver with ESX credentials."""
def __init__(self):
super(VMRCConsole, self).__init__()
@property
def console_type(self):
return 'vmrc+credentials'
def get_port(self, context):
"""Get available port for consoles."""
return FLAGS.console_vmrc_port
def setup_console(self, context, console):
"""Sets up console."""
pass
def teardown_console(self, context, console):
"""Tears down console."""
pass
def init_host(self):
"""Perform console initialization."""
pass
def fix_pool_password(self, password):
"""Encode password."""
# TODO(sateesh): Encrypt pool password
return password
def generate_password(self, vim_session, pool, instance_name):
"""Returns VMRC Connection credentials.
Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'.
"""
username, password = pool['username'], pool['password']
vms = vim_session._call_method(vim_util, 'get_objects',
'VirtualMachine', ['name', 'config.files.vmPathName'])
vm_ds_path_name = None
vm_ref = None
for vm in vms:
vm_name = None
ds_path_name = None
for prop in vm.propSet:
if prop.name == 'name':
vm_name = prop.val
elif prop.name == 'config.files.vmPathName':
ds_path_name = prop.val
if vm_name == instance_name:
vm_ref = vm.obj
vm_ds_path_name = ds_path_name
break
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
json_data = json.dumps({'vm_id': vm_ds_path_name,
'username': username,
'password': password})
return base64.b64encode(json_data)
def is_otp(self):
"""Is one time password or not."""
return False
class VMRCSessionConsole(VMRCConsole):
"""VMRC console driver with VMRC One Time Sessions."""
def __init__(self):
super(VMRCSessionConsole, self).__init__()
@property
def console_type(self):
return 'vmrc+session'
def generate_password(self, vim_session, pool, instance_name):
"""Returns a VMRC Session.
Return string is of the form '<VM MOID>:<VMRC Ticket>'.
"""
vms = vim_session._call_method(vim_util, 'get_objects',
'VirtualMachine', ['name'])
vm_ref = None
for vm in vms:
if vm.propSet[0].val == instance_name:
vm_ref = vm.obj
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
virtual_machine_ticket = \
vim_session._call_method(
vim_session._get_vim(),
'AcquireCloneTicket',
vim_session._get_vim().get_service_content().sessionManager)
json_data = json.dumps({'vm_id': str(vm_ref.value),
'username': virtual_machine_ticket,
'password': virtual_machine_ticket})
return base64.b64encode(json_data)
def is_otp(self):
"""Is one time password or not."""
return True
| apache-2.0 |
mrquim/mrquimrepo | repo/script.module.youtube.dl/lib/youtube_dl/extractor/cultureunplugged.py | 64 | 2508 | from __future__ import unicode_literals
import re
import time
from .common import InfoExtractor
from ..utils import (
int_or_none,
HEADRequest,
)
class CultureUnpluggedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?cultureunplugged\.com/documentary/watch-online/play/(?P<id>\d+)(?:/(?P<display_id>[^/]+))?'
_TESTS = [{
'url': 'http://www.cultureunplugged.com/documentary/watch-online/play/53662/The-Next--Best-West',
'md5': 'ac6c093b089f7d05e79934dcb3d228fc',
'info_dict': {
'id': '53662',
'display_id': 'The-Next--Best-West',
'ext': 'mp4',
'title': 'The Next, Best West',
'description': 'md5:0423cd00833dea1519cf014e9d0903b1',
'thumbnail': r're:^https?://.*\.jpg$',
'creator': 'Coldstream Creative',
'duration': 2203,
'view_count': int,
}
}, {
'url': 'http://www.cultureunplugged.com/documentary/watch-online/play/53662',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id') or video_id
# request setClientTimezone.php to get PHPSESSID cookie which is need to get valid json data in the next request
self._request_webpage(HEADRequest(
'http://www.cultureunplugged.com/setClientTimezone.php?timeOffset=%d' % -(time.timezone / 3600)), display_id)
movie_data = self._download_json(
'http://www.cultureunplugged.com/movie-data/cu-%s.json' % video_id, display_id)
video_url = movie_data['url']
title = movie_data['title']
description = movie_data.get('synopsis')
creator = movie_data.get('producer')
duration = int_or_none(movie_data.get('duration'))
view_count = int_or_none(movie_data.get('views'))
thumbnails = [{
'url': movie_data['%s_thumb' % size],
'id': size,
'preference': preference,
} for preference, size in enumerate((
'small', 'large')) if movie_data.get('%s_thumb' % size)]
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'description': description,
'creator': creator,
'duration': duration,
'view_count': view_count,
'thumbnails': thumbnails,
}
| gpl-2.0 |
pbaesse/Sissens | lib/python2.7/site-packages/pip/commands/install.py | 323 | 17412 | from __future__ import absolute_import
import logging
import operator
import os
import tempfile
import shutil
import warnings
try:
import wheel
except ImportError:
wheel = None
from pip.req import RequirementSet
from pip.basecommand import RequirementCommand
from pip.locations import virtualenv_no_global, distutils_scheme
from pip.exceptions import (
InstallationError, CommandError, PreviousBuildDirError,
)
from pip import cmdoptions
from pip.utils import ensure_dir, get_installed_version
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip10Warning
from pip.utils.filesystem import check_path_owner
from pip.wheel import WheelCache, WheelBuilder
logger = logging.getLogger(__name__)
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help=("Download packages into <dir> instead of installing them, "
"regardless of what's already installed."),
)
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. The handling of dependencies depends on the '
'upgrade-strategy used.'
)
cmd_opts.add_option(
'--upgrade-strategy',
dest='upgrade_strategy',
default='eager',
choices=['only-if-needed', 'eager'],
help='Determines how dependency upgrading should be handled. '
'"eager" - dependencies are upgraded regardless of '
'whether the currently installed version satisfies the '
'requirements of the upgraded package(s). '
'"only-if-needed" - are upgraded only when they do not '
'satisfy the requirements of the upgraded package(s).'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.ignore_requires_python())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally "
"does. This option is not about installing *from* eggs. "
"(WARNING: Because this option overrides pip's normal install"
" logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
'--prefix',
dest='prefix_path',
metavar='dir',
default=None,
help="Installation prefix where lib, bin and other top-level "
"folders are placed")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel())
cmd_opts.add_option(cmdoptions.no_use_wheel())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.pre())
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.as_egg:
warnings.warn(
"--egg has been deprecated and will be removed in the future. "
"This flag is mutually exclusive with large parts of pip, and "
"actually using it invalidates pip's ability to manage the "
"installation process.",
RemovedInPip10Warning,
)
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.download_dir:
warnings.warn(
"pip install --download has been deprecated and will be "
"removed in the future. Pip now has a download command that "
"should be used instead.",
RemovedInPip10Warning,
)
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if options.prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
if options.cache_dir and not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"by the current user and caching wheels has been "
"disabled. check the permissions and owner of that "
"directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
upgrade_strategy=options.upgrade_strategy,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
ignore_requires_python=options.ignore_requires_python,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
require_hashes=options.require_hashes,
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
if (options.download_dir or not wheel or not
options.cache_dir):
# on -d don't do complex things like building
# wheels, and don't try to build wheels when wheel is
# not installed.
requirement_set.prepare_files(finder)
else:
# build wheels before install.
wb = WheelBuilder(
requirement_set,
finder,
build_options=[],
global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(autobuilding=True)
if not options.download_dir:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
prefix=options.prefix_path,
)
possible_lib_locations = get_lib_location_guesses(
user=options.use_user_site,
home=temp_target_dir,
root=options.root_path,
prefix=options.prefix_path,
isolated=options.isolated_mode,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
installed_version = get_installed_version(
req.name, possible_lib_locations
)
if installed_version:
item += '-' + installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
if options.target_dir:
ensure_dir(options.target_dir)
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
lib_dir_list = []
purelib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
platlib_dir = distutils_scheme('', home=temp_target_dir)['platlib']
if os.path.exists(purelib_dir):
lib_dir_list.append(purelib_dir)
if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
lib_dir_list.append(platlib_dir)
for lib_dir in lib_dir_list:
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
def get_lib_location_guesses(*args, **kwargs):
scheme = distutils_scheme('', *args, **kwargs)
return [scheme['purelib'], scheme['platlib']]
| gpl-3.0 |
josircg/raizcidadanista | raizcidadanista/filebrowser/widgets.py | 19 | 3611 | # coding: utf-8
# DJANGO IMPORTS
from django.template.loader import render_to_string
from django.forms.widgets import ClearableFileInput as DjangoClearableFileInput
from django.forms.widgets import CheckboxInput
from django.utils.translation import ugettext_lazy
from django.utils.safestring import mark_safe
# FILEBROWSER IMPORTS
from filebrowser.base import FileObject
from filebrowser.settings import ADMIN_THUMBNAIL
class FileInput(DjangoClearableFileInput):
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
clear_checkbox_label = ugettext_lazy('Clear')
template_with_initial = u'%(input)s %(preview)s'
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'preview': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = u'%(input)s'
substitutions['input'] = super(DjangoClearableFileInput, self).render(name, value, attrs)
if value and hasattr(value, "url"):
template = self.template_with_initial
preview_template = render_to_string('filebrowser/widgets/fileinput.html', {
'value': FileObject(value.name),
'ADMIN_THUMBNAIL': ADMIN_THUMBNAIL,
})
substitutions["preview"] = preview_template
return mark_safe(template % substitutions)
class ClearableFileInput(DjangoClearableFileInput):
"""
A FileField Widget that shows its current value if it has one.
If value is an Image, a thumbnail is shown.
"""
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
clear_checkbox_label = ugettext_lazy('Clear')
template_with_initial = u'<p class="file-upload">%(initial_text)s: %(initial)s<span class="clearable-file-input">%(clear_template)s</span><br />%(input_text)s: %(input)s %(preview)s</p>'
template_with_clear = u'%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label>'
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'preview': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = u'%(input)s'
substitutions['input'] = super(DjangoClearableFileInput, self).render(name, value, attrs)
if value and hasattr(value, "url"):
template = self.template_with_initial
substitutions['initial'] = (u'<a target="_blank" href="%s">%s</a>' % (value.url, value))
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = checkbox_name
substitutions['clear_checkbox_id'] = checkbox_id
substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})
substitutions['clear_template'] = self.template_with_clear % substitutions
if value and hasattr(value, "url"):
preview_template = render_to_string('filebrowser/widgets/clearablefileinput.html', {
'value': FileObject(value.name),
'ADMIN_THUMBNAIL': ADMIN_THUMBNAIL,
})
substitutions["preview"] = preview_template
return mark_safe(template % substitutions)
| gpl-3.0 |
leedm777/sensu-community-plugins | plugins/openstack/neutron/check_neutron-api.py | 49 | 2449 | #!/usr/bin/env python
#
# Check OpenStack Neutron API Status
# ===
#
# Dependencies
# -----------
# - python-neutronclient and related libraries
#
# Performs API query to determine 'alive' status of the
# Neutron API.
#
# Author: Mike Dorman <mdorman@godaddy.com>
# Significantly based on neutron-agent-status.py by
# Brian Clark <brian.clark@cloudapt.com>
#
# Released under the same terms as Sensu (the MIT license);
# see LICENSE for details.
#
# #RED
import sys
import argparse
import logging
from neutronclient.neutron import client
STATE_OK = 0
STATE_WARNING = 1
STATE_CRITICAL = 2
STATE_UNKNOWN = 3
logging.basicConfig(level=logging.INFO)
#logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Check OpenStack Neutron API status')
parser.add_argument('--auth-url', metavar='URL', type=str,
required=True,
help='Keystone URL')
parser.add_argument('--username', metavar='username', type=str,
required=True,
help='username for authentication')
parser.add_argument('--password', metavar='password', type=str,
required=True,
help='password for authentication')
parser.add_argument('--tenant', metavar='tenant', type=str,
required=True,
help='tenant name for authentication')
parser.add_argument('--region_name', metavar='region', type=str,
help='Region to select for authentication')
parser.add_argument('--bypass', metavar='bybass', type=str,
required=False,
help='bypass the service catalog and use this URL for Nova API')
args = parser.parse_args()
try:
c = client.Client('2.0',
username=args.username,
tenant_name=args.tenant,
password=args.password,
auth_url=args.auth_url,
region_name=args.region_name,
insecure=True,
endpoint_url=args.bypass)
networks = c.list_networks()
except Exception as e:
print str(e)
sys.exit(STATE_CRITICAL)
if len(networks) < 1:
exit_state = STATE_WARNING
state_string = "WARNING"
else:
exit_state = STATE_OK
state_string = "OK"
print "Neutron API status: {state_str}, {networks} network(s) found.".format(state_str=state_string, networks=len(networks))
sys.exit(exit_state)
| mit |
aaltay/beam | learning/katas/python/Common Transforms/Filter/ParDo/task.py | 6 | 1130 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apache_beam as beam
from log_elements import LogElements
class FilterOutEvenNumber(beam.DoFn):
def process(self, element):
if element % 2 == 1:
yield element
with beam.Pipeline() as p:
(p | beam.Create(range(1, 11))
| beam.ParDo(FilterOutEvenNumber())
| LogElements())
| apache-2.0 |
duncan-brown/pycbc | pycbc/frame/losc.py | 5 | 4995 | # Copyright (C) 2017 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules contains functions for getting data from the LOSC
"""
from astropy.utils.data import download_file
_losc_url = "https://losc.ligo.org/archive/links/%s/%s/%s/%s/json/"
def get_run(time):
if 1164556817 <= time <= 1187733618:
return 'O2_16KHZ_R1'
if 1126051217 <= time <= 1137254417:
return 'O1'
elif 815011213 <= time <= 875318414:
return 'S5'
elif 930787215 <= time <= 971568015:
return 'S6'
else:
raise ValueError('Time %s not available in a public dataset' % time)
def _get_channel(time):
if 1164556817 <= time <= 1187733618:
return 'GWOSC-16KHZ_R1_STRAIN'
else:
return 'LOSC-STRAIN'
def losc_frame_json(ifo, start_time, end_time):
""" Get the information about the public data files in a duration of time
Parameters
----------
ifo: str
The name of the IFO to find the information about.
start_time: int
The gps time in GPS seconds
end_time: int
The end time in GPS seconds
Returns
-------
info: dict
A dictionary containing information about the files that span the
requested times.
"""
import json
try:
from urllib.request import urlopen
except ImportError: # python < 3
from urllib import urlopen
run = get_run(start_time)
run2 = get_run(end_time)
if run != run2:
raise ValueError('Spanning multiple runs is not currently supported.'
'You have requested data that uses '
'both %s and %s' % (run, run2))
url = _losc_url % (run, ifo, int(start_time), int(end_time))
try:
return json.loads(urlopen(url).read().decode())
except Exception as e:
print(e)
raise ValueError('Failed to find gwf files for '
'ifo=%s, run=%s, between %s-%s' % (ifo, run, start_time, end_time))
def losc_frame_urls(ifo, start_time, end_time):
""" Get a list of urls to losc frame files
Parameters
----------
ifo: str
The name of the IFO to find the information about.
start_time: int
The gps time in GPS seconds
end_time: int
The end time in GPS seconds
Returns
-------
frame_files: list
A dictionary containing information about the files that span the
requested times.
"""
data = losc_frame_json(ifo, start_time, end_time)['strain']
return [d['url'] for d in data if d['format'] == 'gwf']
def read_frame_losc(channels, start_time, end_time):
""" Read channels from losc data
Parameters
----------
channels: str or list
The channel name to read or list of channel names.
start_time: int
The gps time in GPS seconds
end_time: int
The end time in GPS seconds
Returns
-------
ts: TimeSeries
Returns a timeseries or list of timeseries with the requested data.
"""
from pycbc.frame import read_frame
if not isinstance(channels, list):
channels = [channels]
ifos = [c[0:2] for c in channels]
urls = {}
for ifo in ifos:
urls[ifo] = losc_frame_urls(ifo, start_time, end_time)
if len(urls[ifo]) == 0:
raise ValueError("No data found for %s so we "
"can't produce a time series" % ifo)
fnames = {ifo:[] for ifo in ifos}
for ifo in ifos:
for url in urls[ifo]:
fname = download_file(url, cache=True)
fnames[ifo].append(fname)
ts = [read_frame(fnames[channel[0:2]], channel,
start_time=start_time, end_time=end_time) for channel in channels]
if len(ts) == 1:
return ts[0]
else:
return ts
def read_strain_losc(ifo, start_time, end_time):
""" Get the strain data from the LOSC data
Parameters
----------
ifo: str
The name of the IFO to read data for. Ex. 'H1', 'L1', 'V1'
start_time: int
The gps time in GPS seconds
end_time: int
The end time in GPS seconds
Returns
-------
ts: TimeSeries
Returns a timeseries with the strain data.
"""
channel = _get_channel(start_time)
return read_frame_losc('%s:%s' % (ifo, channel), start_time, end_time)
| gpl-3.0 |
koparasy/gemfi | util/streamline/m5stats2streamline.py | 6 | 42738 | #!/usr/bin/env python
# Copyright (c) 2012 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Dam Sunwoo
#
# This script converts gem5 output to ARM DS-5 Streamline .apc project file
# (Requires the gem5 runs to be run with ContextSwitchStatsDump enabled and
# some patches applied to target Linux kernel.)
# Visit http://www.gem5.org/Streamline for more details.
#
# Usage:
# m5stats2streamline.py <stat_config.ini> <gem5 run folder> <dest .apc folder>
#
# <stat_config.ini>: .ini file that describes which stats to be included
# in conversion. Sample .ini files can be found in
# util/streamline.
# NOTE: this is NOT the gem5 config.ini file.
#
# <gem5 run folder>: Path to gem5 run folder (must contain config.ini,
# stats.txt[.gz], and system.tasks.txt.)
#
# <dest .apc folder>: Destination .apc folder path
#
# APC project generation based on Gator v17 (DS-5 v5.17)
# Subsequent versions should be backward compatible
import re, sys, os
from ConfigParser import ConfigParser
import gzip
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
import shutil
import zlib
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Converts gem5 runs to ARM DS-5 Streamline .apc project file.
(NOTE: Requires gem5 runs to be run with ContextSwitchStatsDump
enabled and some patches applied to the target Linux kernel.)
Visit http://www.gem5.org/Streamline for more details.
APC project generation based on Gator v17 (DS-5 v5.17)
Subsequent versions should be backward compatible
""")
parser.add_argument("stat_config_file", metavar="<stat_config.ini>",
help=".ini file that describes which stats to be included \
in conversion. Sample .ini files can be found in \
util/streamline. NOTE: this is NOT the gem5 config.ini \
file.")
parser.add_argument("input_path", metavar="<gem5 run folder>",
help="Path to gem5 run folder (must contain config.ini, \
stats.txt[.gz], and system.tasks.txt.)")
parser.add_argument("output_path", metavar="<dest .apc folder>",
help="Destination .apc folder path")
parser.add_argument("--num-events", action="store", type=int,
default=1000000,
help="Maximum number of scheduling (context switch) \
events to be processed. Set to truncate early. \
Default=1000000")
parser.add_argument("--gzipped-bmp-not-supported", action="store_true",
help="Do not use gzipped .bmp files for visual annotations. \
This option is only required when using Streamline versions \
older than 5.14")
parser.add_argument("--verbose", action="store_true",
help="Enable verbose output")
args = parser.parse_args()
if not re.match("(.*)\.apc", args.output_path):
print "ERROR: <dest .apc folder> should end with '.apc'!"
sys.exit(1)
# gzipped BMP files for visual annotation is supported in Streamline 5.14.
# Setting this to True will significantly compress the .apc binary file that
# includes frame buffer snapshots.
gzipped_bmp_supported = not args.gzipped_bmp_not_supported
ticks_in_ns = -1
# Default max # of events. Increase this for longer runs.
num_events = args.num_events
start_tick = -1
end_tick = -1
# Parse gem5 config.ini file to determine some system configurations.
# Number of CPUs, L2s, etc.
def parseConfig(config_file):
global num_cpus, num_l2
print "\n==============================="
print "Parsing gem5 config.ini file..."
print config_file
print "===============================\n"
config = ConfigParser()
if not config.read(config_file):
print "ERROR: config file '", config_file, "' not found"
sys.exit(1)
if config.has_section("system.cpu"):
num_cpus = 1
else:
num_cpus = 0
while config.has_section("system.cpu" + str(num_cpus)):
num_cpus += 1
if config.has_section("system.l2"):
num_l2 = 1
else:
num_l2 = 0
while config.has_section("system.l2" + str(num_l2)):
num_l2 += 1
print "Num CPUs:", num_cpus
print "Num L2s:", num_l2
print ""
return (num_cpus, num_l2)
process_dict = {}
thread_dict = {}
process_list = []
idle_uid = -1
kernel_uid = -1
class Task(object):
def __init__(self, uid, pid, tgid, task_name, is_process, tick):
if pid == 0: # Idle
self.uid = 0
elif pid == -1: # Kernel
self.uid = 0
else:
self.uid = uid
self.pid = pid
self.tgid = tgid
self.is_process = is_process
self.task_name = task_name
self.children = []
self.tick = tick # time this task first appeared
class Event(object):
def __init__(self, tick, task):
self.tick = tick
self.task = task
############################################################
# Types used in APC Protocol
# - packed32, packed64
# - int32
# - string
############################################################
def packed32(x):
ret = []
more = True
while more:
b = x & 0x7f
x = x >> 7
if (((x == 0) and ((b & 0x40) == 0)) or \
((x == -1) and ((b & 0x40) != 0))):
more = False
else:
b = b | 0x80
ret.append(b)
return ret
# For historical reasons, 32/64-bit versions of functions are presevered
def packed64(x):
return packed32(x)
# variable length packed 4-byte signed value
def unsigned_packed32(x):
ret = []
if ((x & 0xffffff80) == 0):
ret.append(x & 0x7f)
elif ((x & 0xffffc000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append((x >> 7) & 0x7f)
elif ((x & 0xffe00000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append((x >> 14) & 0x7f)
elif ((x & 0xf0000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append((x >> 21) & 0x7f)
else:
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append((x >> 28) & 0x0f)
return ret
# variable length packed 8-byte signed value
def unsigned_packed64(x):
ret = []
if ((x & 0xffffffffffffff80) == 0):
ret.append(x & 0x7f)
elif ((x & 0xffffffffffffc000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append((x >> 7) & 0x7f)
elif ((x & 0xffffffffffe00000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append((x >> 14) & 0x7f)
elif ((x & 0xfffffffff0000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append((x >> 21) & 0x7f)
elif ((x & 0xfffffff800000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append((x >> 28) & 0x7f)
elif ((x & 0xfffffc0000000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append(((x >> 28) | 0x80) & 0xff)
ret.append((x >> 35) & 0x7f)
elif ((x & 0xfffe000000000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append(((x >> 28) | 0x80) & 0xff)
ret.append(((x >> 35) | 0x80) & 0xff)
ret.append((x >> 42) & 0x7f)
elif ((x & 0xff00000000000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append(((x >> 28) | 0x80) & 0xff)
ret.append(((x >> 35) | 0x80) & 0xff)
ret.append(((x >> 42) | 0x80) & 0xff)
ret.append((x >> 49) & 0x7f)
elif ((x & 0x8000000000000000) == 0):
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append(((x >> 28) | 0x80) & 0xff)
ret.append(((x >> 35) | 0x80) & 0xff)
ret.append(((x >> 42) | 0x80) & 0xff)
ret.append(((x >> 49) | 0x80) & 0xff)
ret.append((x >> 56) & 0x7f)
else:
ret.append((x | 0x80) & 0xff)
ret.append(((x >> 7) | 0x80) & 0xff)
ret.append(((x >> 14) | 0x80) & 0xff)
ret.append(((x >> 21) | 0x80) & 0xff)
ret.append(((x >> 28) | 0x80) & 0xff)
ret.append(((x >> 35) | 0x80) & 0xff)
ret.append(((x >> 42) | 0x80) & 0xff)
ret.append(((x >> 49) | 0x80) & 0xff)
ret.append(((x >> 56) | 0x80) & 0xff)
ret.append((x >> 63) & 0x7f)
return ret
# 4-byte signed little endian
def int32(x):
ret = []
ret.append(x & 0xff)
ret.append((x >> 8) & 0xff)
ret.append((x >> 16) & 0xff)
ret.append((x >> 24) & 0xff)
return ret
# 2-byte signed little endian
def int16(x):
ret = []
ret.append(x & 0xff)
ret.append((x >> 8) & 0xff)
return ret
# a packed32 length followed by the specified number of characters
def stringList(x):
ret = []
ret += packed32(len(x))
for i in x:
ret.append(i)
return ret
def utf8StringList(x):
ret = []
for i in x:
ret.append(ord(i))
return ret
# packed64 time value in nanoseconds relative to the uptime from the
# Summary message.
def timestampList(x):
ret = packed64(x)
return ret
############################################################
# Write binary
############################################################
def writeBinary(outfile, binary_list):
for i in binary_list:
outfile.write("%c" % i)
############################################################
# APC Protocol Frame Types
############################################################
def addFrameHeader(frame_type, body, core):
ret = []
if frame_type == "Summary":
code = 1
elif frame_type == "Backtrace":
code = 2
elif frame_type == "Name":
code = 3
elif frame_type == "Counter":
code = 4
elif frame_type == "Block Counter":
code = 5
elif frame_type == "Annotate":
code = 6
elif frame_type == "Sched Trace":
code = 7
elif frame_type == "GPU Trace":
code = 8
elif frame_type == "Idle":
code = 9
else:
print "ERROR: Unknown frame type:", frame_type
sys.exit(1)
packed_code = packed32(code)
packed_core = packed32(core)
length = int32(len(packed_code) + len(packed_core) + len(body))
ret = length + packed_code + packed_core + body
return ret
# Summary frame
# - timestamp: packed64
# - uptime: packed64
def summaryFrame(timestamp, uptime):
frame_type = "Summary"
newline_canary = stringList("1\n2\r\n3\r4\n\r5")
monotonic_delta = packed64(0)
end_of_attr = stringList("")
body = newline_canary + packed64(timestamp) + packed64(uptime)
body += monotonic_delta + end_of_attr
ret = addFrameHeader(frame_type, body, 0)
return ret
# Backtrace frame
# - not implemented yet
def backtraceFrame():
pass
# Cookie name message
# - cookie: packed32
# - name: string
def cookieNameFrame(cookie, name):
frame_type = "Name"
packed_code = packed32(1)
body = packed_code + packed32(cookie) + stringList(name)
ret = addFrameHeader(frame_type, body, 0)
return ret
# Thread name message
# - timestamp: timestamp
# - thread id: packed32
# - name: string
def threadNameFrame(timestamp, thread_id, name):
frame_type = "Name"
packed_code = packed32(2)
body = packed_code + timestampList(timestamp) + \
packed32(thread_id) + stringList(name)
ret = addFrameHeader(frame_type, body, 0)
return ret
# Core name message
# - name: string
# - core_id: packed32
# - cpuid: packed32
def coreNameFrame(name, core_id, cpuid):
frame_type = "Name"
packed_code = packed32(3)
body = packed_code + packed32(core_id) + packed32(cpuid) + stringList(name)
ret = addFrameHeader(frame_type, body, 0)
return ret
# IRQ Cookie name message
# - cookie: packed32
# - name: string
# - irq: packed32
def irqCookieNameFrame(cookie, name, irq):
frame_type = "Name"
packed_code = packed32(5)
body = packed_code + packed32(cookie) + stringList(name) + packed32(irq)
ret = addFrameHeader(frame_type, body, 0)
return ret
# Counter frame message
# - timestamp: timestamp
# - core: packed32
# - key: packed32
# - value: packed64
def counterFrame(timestamp, core, key, value):
frame_type = "Counter"
body = timestampList(timestamp) + packed32(core) + packed32(key) + \
packed64(value)
ret = addFrameHeader(frame_type, body, core)
return ret
# Block Counter frame message
# - key: packed32
# - value: packed64
def blockCounterFrame(core, key, value):
frame_type = "Block Counter"
body = packed32(key) + packed64(value)
ret = addFrameHeader(frame_type, body, core)
return ret
# Annotate frame messages
# - core: packed32
# - tid: packed32
# - timestamp: timestamp
# - size: packed32
# - body
def annotateFrame(core, tid, timestamp, size, userspace_body):
frame_type = "Annotate"
body = packed32(core) + packed32(tid) + timestampList(timestamp) + \
packed32(size) + userspace_body
ret = addFrameHeader(frame_type, body, core)
return ret
# Scheduler Trace frame messages
# Sched Switch
# - Code: 1
# - timestamp: timestamp
# - pid: packed32
# - tid: packed32
# - cookie: packed32
# - state: packed32
def schedSwitchFrame(core, timestamp, pid, tid, cookie, state):
frame_type = "Sched Trace"
body = packed32(1) + timestampList(timestamp) + packed32(pid) + \
packed32(tid) + packed32(cookie) + packed32(state)
ret = addFrameHeader(frame_type, body, core)
return ret
# Sched Thread Exit
# - Code: 2
# - timestamp: timestamp
# - tid: packed32
def schedThreadExitFrame(core, timestamp, pid, tid, cookie, state):
frame_type = "Sched Trace"
body = packed32(2) + timestampList(timestamp) + packed32(tid)
ret = addFrameHeader(frame_type, body, core)
return ret
# GPU Trace frame messages
# - Not implemented yet
def gpuTraceFrame():
pass
# Idle frame messages
# Enter Idle
# - code: 1
# - timestamp: timestamp
# - core: packed32
def enterIdleFrame(timestamp, core):
frame_type = "Idle"
body = packed32(1) + timestampList(timestamp) + packed32(core)
ret = addFrameHeader(frame_type, body, core)
return ret
# Exit Idle
# - code: 2
# - timestamp: timestamp
# - core: packed32
def exitIdleFrame(timestamp, core):
frame_type = "Idle"
body = packed32(2) + timestampList(timestamp) + packed32(core)
ret = addFrameHeader(frame_type, body, core)
return ret
####################################################################
def parseProcessInfo(task_file):
print "\n==============================="
print "Parsing Task file..."
print task_file
print "===============================\n"
global start_tick, end_tick, num_cpus
global process_dict, thread_dict, process_list
global event_list, unified_event_list
global idle_uid, kernel_uid
event_list = []
unified_event_list = []
for cpu in range(num_cpus):
event_list.append([])
uid = 1 # uid 0 is reserved for idle
# Dummy Tasks for frame buffers and system diagrams
process = Task(uid, 9999, 9999, "framebuffer", True, 0)
process_list.append(process)
uid += 1
thread = Task(uid, 9999, 9999, "framebuffer", False, 0)
process.children.append(thread)
uid += 1
process = Task(uid, 9998, 9998, "System", True, 0)
process_list.append(process)
# if we don't find the real kernel, use this to keep things going
kernel_uid = uid
uid += 1
thread = Task(uid, 9998, 9998, "System", False, 0)
process.children.append(thread)
uid += 1
ext = os.path.splitext(task_file)[1]
try:
if ext == ".gz":
process_file = gzip.open(task_file, 'rb')
else:
process_file = open(task_file, 'rb')
except:
print "ERROR opening task file:", task_file
print "Make sure context switch task dumping is enabled in gem5."
sys.exit(1)
process_re = re.compile("tick=(\d+)\s+(\d+)\s+cpu_id=(\d+)\s+" +
"next_pid=([-\d]+)\s+next_tgid=([-\d]+)\s+next_task=(.*)")
task_name_failure_warned = False
for line in process_file:
match = re.match(process_re, line)
if match:
tick = int(match.group(1))
if (start_tick < 0):
start_tick = tick
cpu_id = int(match.group(3))
pid = int(match.group(4))
tgid = int(match.group(5))
task_name = match.group(6)
if not task_name_failure_warned:
if task_name == "FailureIn_curTaskName":
print "-------------------------------------------------"
print "WARNING: Task name not set correctly!"
print "Process/Thread info will not be displayed correctly"
print "Perhaps forgot to apply m5struct.patch to kernel?"
print "-------------------------------------------------"
task_name_failure_warned = True
if not tgid in process_dict:
if tgid == pid:
# new task is parent as well
if args.verbose:
print "new process", uid, pid, tgid, task_name
if tgid == 0:
# new process is the "idle" task
process = Task(uid, pid, tgid, "idle", True, tick)
idle_uid = 0
else:
process = Task(uid, pid, tgid, task_name, True, tick)
else:
if tgid == 0:
process = Task(uid, tgid, tgid, "idle", True, tick)
idle_uid = 0
else:
# parent process name not known yet
process = Task(uid, tgid, tgid, "_Unknown_", True, tick)
if tgid == -1: # kernel
kernel_uid = 0
uid += 1
process_dict[tgid] = process
process_list.append(process)
else:
if tgid == pid:
if process_dict[tgid].task_name == "_Unknown_":
if args.verbose:
print "new process", \
process_dict[tgid].uid, pid, tgid, task_name
process_dict[tgid].task_name = task_name
if process_dict[tgid].task_name != task_name and tgid != 0:
process_dict[tgid].task_name = task_name
if not pid in thread_dict:
if args.verbose:
print "new thread", \
uid, process_dict[tgid].uid, pid, tgid, task_name
thread = Task(uid, pid, tgid, task_name, False, tick)
uid += 1
thread_dict[pid] = thread
process_dict[tgid].children.append(thread)
else:
if thread_dict[pid].task_name != task_name:
thread_dict[pid].task_name = task_name
if args.verbose:
print tick, uid, cpu_id, pid, tgid, task_name
task = thread_dict[pid]
event = Event(tick, task)
event_list[cpu_id].append(event)
unified_event_list.append(event)
if len(unified_event_list) == num_events:
print "Truncating at", num_events, "events!"
break
print "Found %d events." % len(unified_event_list)
for process in process_list:
if process.pid > 9990: # fix up framebuffer ticks
process.tick = start_tick
print process.uid, process.pid, process.tgid, \
process.task_name, str(process.tick)
for thread in process.children:
if thread.pid > 9990:
thread.tick = start_tick
print "\t", thread.uid, thread.pid, thread.tgid, \
thread.task_name, str(thread.tick)
end_tick = tick
print "Start tick:", start_tick
print "End tick: ", end_tick
print ""
return
def initOutput(output_path):
if not os.path.exists(output_path):
os.mkdir(output_path)
def ticksToNs(tick):
if ticks_in_ns < 0:
print "ticks_in_ns not set properly!"
sys.exit(1)
return tick / ticks_in_ns
def writeXmlFile(xml, filename):
f = open(filename, "w")
txt = ET.tostring(xml)
f.write(minidom.parseString(txt).toprettyxml())
f.close()
# StatsEntry that contains individual statistics
class StatsEntry(object):
def __init__(self, name, group, group_index, per_cpu, per_switchcpu, key):
# Full name of statistics
self.name = name
# Streamline group name that statistic will belong to
self.group = group
# Index of statistics within group (used to change colors within groups)
self.group_index = group_index
# Shorter name with "system" stripped off
# and symbols converted to alphanumerics
self.short_name = re.sub("system\.", "", name)
self.short_name = re.sub(":", "_", name)
# Regex for this stat (string version used to construct union regex)
self.regex_string = "^" + name + "\s+([\d\.]+)"
self.regex = re.compile("^" + name + "\s+([\d\.e\-]+)\s+# (.*)$", re.M)
self.description = ""
# Whether this stat is use per CPU or not
self.per_cpu = per_cpu
self.per_switchcpu = per_switchcpu
# Key used in .apc protocol (as described in captured.xml)
self.key = key
# List of values of stat per timestamp
self.values = []
# Whether this stat has been found for the current timestamp
self.found = False
# Whether this stat has been found at least once
# (to suppress too many warnings)
self.not_found_at_least_once = False
# Field used to hold ElementTree subelement for this stat
self.ET_element = None
# Create per-CPU stat name and regex, etc.
if self.per_cpu:
self.per_cpu_regex_string = []
self.per_cpu_regex = []
self.per_cpu_name = []
self.per_cpu_found = []
for i in range(num_cpus):
# Resuming from checkpoints results in using "switch_cpus"
if per_switchcpu:
per_cpu_name = "system.switch_cpus"
else:
per_cpu_name = "system.cpu"
# No CPU number appends if num_cpus == 1
if num_cpus > 1:
per_cpu_name += str(i)
per_cpu_name += "." + self.name
self.per_cpu_name.append(per_cpu_name)
print "\t", per_cpu_name
self.per_cpu_regex_string.\
append("^" + per_cpu_name + "\s+[\d\.]+")
self.per_cpu_regex.append(re.compile("^" + per_cpu_name + \
"\s+([\d\.e\-]+)\s+# (.*)$", re.M))
self.values.append([])
self.per_cpu_found.append(False)
def append_value(self, val, per_cpu_index = None):
if self.per_cpu:
self.values[per_cpu_index].append(str(val))
else:
self.values.append(str(val))
# Global stats object that contains the list of stats entries
# and other utility functions
class Stats(object):
def __init__(self):
self.stats_list = []
self.tick_list = []
self.next_key = 1
def register(self, name, group, group_index, per_cpu, per_switchcpu):
print "registering stat:", name, "group:", group, group_index
self.stats_list.append(StatsEntry(name, group, group_index, per_cpu, \
per_switchcpu, self.next_key))
self.next_key += 1
# Union of all stats to accelerate parsing speed
def createStatsRegex(self):
regex_strings = [];
print "\nnum entries in stats_list", len(self.stats_list)
for entry in self.stats_list:
if entry.per_cpu:
for i in range(num_cpus):
regex_strings.append(entry.per_cpu_regex_string[i])
else:
regex_strings.append(entry.regex_string)
self.regex = re.compile('|'.join(regex_strings))
def registerStats(config_file):
print "==============================="
print "Parsing stats config.ini file..."
print config_file
print "==============================="
config = ConfigParser()
if not config.read(config_file):
print "ERROR: config file '", config_file, "' not found!"
sys.exit(1)
print "\nRegistering Stats..."
stats = Stats()
per_cpu_stat_groups = config.options('PER_CPU_STATS')
for group in per_cpu_stat_groups:
i = 0
per_cpu_stats_list = config.get('PER_CPU_STATS', group).split('\n')
for item in per_cpu_stats_list:
if item:
stats.register(item, group, i, True, False)
i += 1
per_cpu_stat_groups = config.options('PER_SWITCHCPU_STATS')
for group in per_cpu_stat_groups:
i = 0
per_cpu_stats_list = \
config.get('PER_SWITCHCPU_STATS', group).split('\n')
for item in per_cpu_stats_list:
if item:
stats.register(item, group, i, True, True)
i += 1
per_l2_stat_groups = config.options('PER_L2_STATS')
for group in per_l2_stat_groups:
i = 0
per_l2_stats_list = config.get('PER_L2_STATS', group).split('\n')
for item in per_l2_stats_list:
if item:
for l2 in range(num_l2):
name = item
prefix = "system.l2"
if num_l2 > 1:
prefix += str(l2)
prefix += "."
name = prefix + name
stats.register(name, group, i, False, False)
i += 1
other_stat_groups = config.options('OTHER_STATS')
for group in other_stat_groups:
i = 0
other_stats_list = config.get('OTHER_STATS', group).split('\n')
for item in other_stats_list:
if item:
stats.register(item, group, i, False, False)
i += 1
stats.createStatsRegex()
return stats
# Parse and read in gem5 stats file
# Streamline counters are organized per CPU
def readGem5Stats(stats, gem5_stats_file):
print "\n==============================="
print "Parsing gem5 stats file..."
print gem5_stats_file
print "===============================\n"
ext = os.path.splitext(gem5_stats_file)[1]
window_start_regex = \
re.compile("^---------- Begin Simulation Statistics ----------")
window_end_regex = \
re.compile("^---------- End Simulation Statistics ----------")
final_tick_regex = re.compile("^final_tick\s+(\d+)")
global ticks_in_ns
sim_freq_regex = re.compile("^sim_freq\s+(\d+)")
sim_freq = -1
try:
if ext == ".gz":
f = gzip.open(gem5_stats_file, "r")
else:
f = open(gem5_stats_file, "r")
except:
print "ERROR opening stats file", gem5_stats_file, "!"
sys.exit(1)
stats_not_found_list = stats.stats_list[:]
window_num = 0
while (True):
error = False
try:
line = f.readline()
except IOError:
print ""
print "WARNING: IO error in stats file"
print "(gzip stream not closed properly?)...continuing for now"
error = True
if not line:
break
# Find out how many gem5 ticks in 1ns
if sim_freq < 0:
m = sim_freq_regex.match(line)
if m:
sim_freq = int(m.group(1)) # ticks in 1 sec
ticks_in_ns = int(sim_freq / 1e9)
print "Simulation frequency found! 1 tick == %e sec\n" \
% (1.0 / sim_freq)
# Final tick in gem5 stats: current absolute timestamp
m = final_tick_regex.match(line)
if m:
tick = int(m.group(1))
if tick > end_tick:
break
stats.tick_list.append(tick)
if (window_end_regex.match(line) or error):
if args.verbose:
print "new window"
for stat in stats.stats_list:
if stat.per_cpu:
for i in range(num_cpus):
if not stat.per_cpu_found[i]:
if not stat.not_found_at_least_once:
print "WARNING: stat not found in window #", \
window_num, ":", stat.per_cpu_name[i]
print "suppressing further warnings for " + \
"this stat"
stat.not_found_at_least_once = True
stat.values[i].append(str(0))
stat.per_cpu_found[i] = False
else:
if not stat.found:
if not stat.not_found_at_least_once:
print "WARNING: stat not found in window #", \
window_num, ":", stat.name
print "suppressing further warnings for this stat"
stat.not_found_at_least_once = True
stat.values.append(str(0))
stat.found = False
stats_not_found_list = stats.stats_list[:]
window_num += 1
if error:
break
# Do a single regex of the union of all stats first for speed
if stats.regex.match(line):
# Then loop through only the stats we haven't seen in this window
for stat in stats_not_found_list[:]:
if stat.per_cpu:
for i in range(num_cpus):
m = stat.per_cpu_regex[i].match(line)
if m:
if stat.name == "ipc":
value = str(int(float(m.group(1)) * 1000))
else:
value = str(int(float(m.group(1))))
if args.verbose:
print stat.per_cpu_name[i], value
stat.values[i].append(value)
stat.per_cpu_found[i] = True
all_found = True
for j in range(num_cpus):
if not stat.per_cpu_found[j]:
all_found = False
if all_found:
stats_not_found_list.remove(stat)
if stat.description == "":
stat.description = m.group(2)
else:
m = stat.regex.match(line)
if m:
value = str(int(float(m.group(1))))
if args.verbose:
print stat.name, value
stat.values.append(value)
stat.found = True
stats_not_found_list.remove(stat)
if stat.description == "":
stat.description = m.group(2)
f.close()
# Create session.xml file in .apc folder
def doSessionXML(output_path):
session_file = output_path + "/session.xml"
xml = ET.Element("session")
xml.set("version", "1")
xml.set("call_stack_unwinding", "no")
xml.set("parse_debug_info", "no")
xml.set("high_resolution", "yes")
xml.set("buffer_mode", "streaming")
xml.set("sample_rate", "low")
# Setting duration to zero for now. Doesn't affect visualization.
xml.set("duration", "0")
xml.set("target_host", "")
xml.set("target_port", "8080")
writeXmlFile(xml, session_file)
# Create captured.xml file in .apc folder
def doCapturedXML(output_path, stats):
captured_file = output_path + "/captured.xml"
xml = ET.Element("captured")
xml.set("version", "1")
xml.set("protocol", "17")
xml.set("backtrace_processing", "none")
target = ET.SubElement(xml, "target")
target.set("name", "gem5")
target.set("sample_rate", "1000")
target.set("cores", str(num_cpus))
counters = ET.SubElement(xml, "counters")
for stat in stats.stats_list:
s = ET.SubElement(counters, "counter")
stat_name = re.sub("\.", "_", stat.short_name)
s.set("title", stat.group)
s.set("name", stat_name)
s.set("color", "0x00000000")
s.set("key", "0x%08x" % stat.key)
s.set("type", stat_name)
s.set("event", "0x00000000")
if stat.per_cpu:
s.set("per_cpu", "yes")
else:
s.set("per_cpu", "no")
s.set("display", "")
s.set("units", "")
s.set("average_selection", "no")
s.set("description", stat.description)
writeXmlFile(xml, captured_file)
# Writes out Streamline cookies (unique IDs per process/thread)
def writeCookiesThreads(blob):
thread_list = []
for process in process_list:
if process.uid > 0:
print "cookie", process.task_name, process.uid
writeBinary(blob, cookieNameFrame(process.uid, process.task_name))
# pid and tgid need to be positive values -- no longer true?
for thread in process.children:
thread_list.append(thread)
# Threads need to be sorted in timestamp order
thread_list.sort(key = lambda x: x.tick)
for thread in thread_list:
print "thread", thread.task_name, (ticksToNs(thread.tick)),\
thread.tgid, thread.pid
writeBinary(blob, threadNameFrame(ticksToNs(thread.tick),\
thread.pid, thread.task_name))
# Writes context switch info as Streamline scheduling events
def writeSchedEvents(blob):
for cpu in range(num_cpus):
for event in event_list[cpu]:
timestamp = ticksToNs(event.tick)
pid = event.task.tgid
tid = event.task.pid
if process_dict.has_key(event.task.tgid):
cookie = process_dict[event.task.tgid].uid
else:
cookie = 0
# State:
# 0: waiting on other event besides I/O
# 1: Contention/pre-emption
# 2: Waiting on I/O
# 3: Waiting on mutex
# Hardcoding to 0 for now. Other states not implemented yet.
state = 0
if args.verbose:
print cpu, timestamp, pid, tid, cookie
writeBinary(blob,\
schedSwitchFrame(cpu, timestamp, pid, tid, cookie, state))
# Writes selected gem5 statistics as Streamline counters
def writeCounters(blob, stats):
timestamp_list = []
for tick in stats.tick_list:
if tick > end_tick:
break
timestamp_list.append(ticksToNs(tick))
for stat in stats.stats_list:
if stat.per_cpu:
stat_length = len(stat.values[0])
else:
stat_length = len(stat.values)
for n in range(len(timestamp_list)):
for stat in stats.stats_list:
if stat.per_cpu:
for i in range(num_cpus):
writeBinary(blob, counterFrame(timestamp_list[n], i, \
stat.key, int(float(stat.values[i][n]))))
else:
writeBinary(blob, counterFrame(timestamp_list[n], 0, \
stat.key, int(float(stat.values[n]))))
# Streamline can display LCD frame buffer dumps (gzipped bmp)
# This function converts the frame buffer dumps to the Streamline format
def writeVisualAnnotations(blob, input_path, output_path):
frame_path = input_path + "/frames_system.vncserver"
if not os.path.exists(frame_path):
return
frame_count = 0
file_list = os.listdir(frame_path)
file_list.sort()
re_fb = re.compile("fb\.(\d+)\.(\d+)\.bmp.gz")
# Use first non-negative pid to tag visual annotations
annotate_pid = -1
for e in unified_event_list:
pid = e.task.pid
if pid >= 0:
annotate_pid = pid
break
for fn in file_list:
m = re_fb.match(fn)
if m:
seq = m.group(1)
tick = int(m.group(2))
if tick > end_tick:
break
frame_count += 1
userspace_body = []
userspace_body += packed32(0x1C) # escape code
userspace_body += packed32(0x04) # visual code
text_annotation = "image_" + str(ticksToNs(tick)) + ".bmp.gz"
userspace_body += int16(len(text_annotation))
userspace_body += utf8StringList(text_annotation)
if gzipped_bmp_supported:
# copy gzipped bmp directly
bytes_read = open(frame_path + "/" + fn, "rb").read()
else:
# copy uncompressed bmp
bytes_read = gzip.open(frame_path + "/" + fn, "rb").read()
userspace_body += int32(len(bytes_read))
userspace_body += bytes_read
writeBinary(blob, annotateFrame(0, annotate_pid, ticksToNs(tick), \
len(userspace_body), userspace_body))
print "\nfound", frame_count, "frames for visual annotation.\n"
def createApcProject(input_path, output_path, stats):
initOutput(output_path)
blob = open(output_path + "/0000000000", "wb")
# Summary frame takes current system time and system uptime.
# Filling in with random values for now.
writeBinary(blob, summaryFrame(1234, 5678))
writeCookiesThreads(blob)
print "writing Events"
writeSchedEvents(blob)
print "writing Counters"
writeCounters(blob, stats)
print "writing Visual Annotations"
writeVisualAnnotations(blob, input_path, output_path)
doSessionXML(output_path)
doCapturedXML(output_path, stats)
blob.close()
#######################
# Main Routine
input_path = args.input_path
output_path = args.output_path
####
# Make sure input path exists
####
if not os.path.exists(input_path):
print "ERROR: Input path %s does not exist!" % input_path
sys.exit(1)
####
# Parse gem5 configuration file to find # of CPUs and L2s
####
(num_cpus, num_l2) = parseConfig(input_path + "/config.ini")
####
# Parse task file to find process/thread info
####
parseProcessInfo(input_path + "/system.tasks.txt")
####
# Parse stat config file and register stats
####
stat_config_file = args.stat_config_file
stats = registerStats(stat_config_file)
####
# Parse gem5 stats
####
# Check if both stats.txt and stats.txt.gz exist and warn if both exist
if os.path.exists(input_path + "/stats.txt") and \
os.path.exists(input_path + "/stats.txt.gz"):
print "WARNING: Both stats.txt.gz and stats.txt exist. \
Using stats.txt.gz by default."
gem5_stats_file = input_path + "/stats.txt.gz"
if not os.path.exists(gem5_stats_file):
gem5_stats_file = input_path + "/stats.txt"
if not os.path.exists(gem5_stats_file):
print "ERROR: stats.txt[.gz] file does not exist in %s!" % input_path
sys.exit(1)
readGem5Stats(stats, gem5_stats_file)
####
# Create Streamline .apc project folder
####
createApcProject(input_path, output_path, stats)
print "All done!"
| bsd-3-clause |
MarsCarl/ucore_lab | related_info/lab7/semaphore_condition/thr-ex1.py | 48 | 1026 | #!/bin/env python
# -*- coding: utf-8 -*-
#filename: peartest.py
import threading, signal
is_exit = False
def doStress(i, cc):
global is_exit
idx = i
while not is_exit:
if (idx < 10000000):
print "thread[%d]: idx=%d"%(i, idx)
idx = idx + cc
else:
break
if is_exit:
print "receive a signal to exit, thread[%d] stop."%i
else:
print "thread[%d] complete."%i
def handler(signum, frame):
global is_exit
is_exit = True
print "receive a signal %d, is_exit = %d"%(signum, is_exit)
if __name__ == "__main__":
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
cc = 5
threads = []
for i in range(cc):
t = threading.Thread(target=doStress, args=(i,cc))
t.setDaemon(True)
threads.append(t)
t.start()
while 1:
alive = False
for i in range(cc):
alive = alive or threads[i].isAlive()
if not alive:
break
| gpl-2.0 |
vnsofthe/odoo-dev | addons/account_budget/__init__.py | 444 | 1097 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_budget
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zhaodelong/django | tests/null_queries/tests.py | 290 | 2928 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Choice, Inner, OuterA, OuterB, Poll
class NullQueriesTests(TestCase):
def test_none_as_null(self):
"""
Regression test for the use of None as a query value.
None is interpreted as an SQL NULL, but only in __exact and __iexact
queries.
Set up some initial polls and choices
"""
p1 = Poll(question='Why?')
p1.save()
c1 = Choice(poll=p1, choice='Because.')
c1.save()
c2 = Choice(poll=p1, choice='Why Not?')
c2.save()
# Exact query with value None returns nothing ("is NULL" in sql,
# but every 'id' field has a value).
self.assertQuerysetEqual(Choice.objects.filter(choice__exact=None), [])
# The same behavior for iexact query.
self.assertQuerysetEqual(Choice.objects.filter(choice__iexact=None), [])
# Excluding the previous result returns everything.
self.assertQuerysetEqual(
Choice.objects.exclude(choice=None).order_by('id'),
[
'<Choice: Choice: Because. in poll Q: Why? >',
'<Choice: Choice: Why Not? in poll Q: Why? >'
]
)
# Valid query, but fails because foo isn't a keyword
self.assertRaises(FieldError, Choice.objects.filter, foo__exact=None)
# Can't use None on anything other than __exact and __iexact
self.assertRaises(ValueError, Choice.objects.filter, id__gt=None)
# Related managers use __exact=None implicitly if the object hasn't been saved.
p2 = Poll(question="How?")
self.assertEqual(repr(p2.choice_set.all()), '[]')
def test_reverse_relations(self):
"""
Querying across reverse relations and then another relation should
insert outer joins correctly so as not to exclude results.
"""
obj = OuterA.objects.create()
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third=None),
['<OuterA: OuterA object>']
)
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third__data=None),
['<OuterA: OuterA object>']
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
Inner.objects.filter(first__inner__third=None),
['<Inner: Inner object>']
)
# Ticket #13815: check if <reverse>_isnull=False does not produce
# faulty empty lists
OuterB.objects.create(data="reverse")
self.assertQuerysetEqual(
OuterB.objects.filter(inner__isnull=False),
[]
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
OuterB.objects.exclude(inner__isnull=False),
['<OuterB: OuterB object>']
)
| bsd-3-clause |
Lujeni/ansible | test/units/modules/network/slxos/test_slxos_l2_interface.py | 38 | 5752 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from units.compat.mock import patch
from units.modules.utils import set_module_args
from ansible.modules.network.slxos import slxos_l2_interface
from .slxos_module import TestSlxosModule, load_fixture
class TestSlxosL2InterfaceModule(TestSlxosModule):
module = slxos_l2_interface
def setUp(self):
super(TestSlxosL2InterfaceModule, self).setUp()
self._patch_get_config = patch(
'ansible.modules.network.slxos.slxos_l2_interface.get_config'
)
self._patch_load_config = patch(
'ansible.modules.network.slxos.slxos_l2_interface.load_config'
)
self._patch_run_commands = patch(
'ansible.modules.network.slxos.slxos_l2_interface.run_commands'
)
self._get_config = self._patch_get_config.start()
self._load_config = self._patch_load_config.start()
self._run_commands = self._patch_run_commands.start()
self._run_commands.side_effect = self.run_commands_load_fixtures
def run_commands_load_fixtures(self, module, commands, *args, **kwargs):
return self.load_fixtures(
commands,
destination=self._run_commands,
return_values=True
)
def tearDown(self):
super(TestSlxosL2InterfaceModule, self).tearDown()
self._patch_get_config.stop()
self._patch_load_config.stop()
self._patch_run_commands.stop()
def load_fixtures(self, commands=None, destination=None, return_values=False):
side_effects = []
if not destination:
destination = self._get_config
if not commands:
commands = ['slxos_config_config.cfg']
for command in commands:
filename = str(command).replace(' ', '_')
filename = str(filename).replace('/', '_')
side_effects.append(load_fixture(filename))
if return_values is True:
return side_effects
destination.side_effect = side_effects
return None
def test_slxos_l2_interface_access_vlan(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mode='access',
access_vlan=200,
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface ethernet 0/2',
'switchport access vlan 200'
],
'changed': True,
'warnings': []
}
)
def test_slxos_l2_interface_vlan_does_not_exist(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mode='access',
access_vlan=10,
))
result = self.execute_module(failed=True)
self.assertEqual(
result,
{
'msg': 'You are trying to configure a VLAN on an interface '
'that\ndoes not exist on the switch yet!',
'failed': True,
'vlan': '10'
}
)
def test_slxos_l2_interface_incorrect_state(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/3',
mode='access',
access_vlan=10,
))
result = self.execute_module(failed=True)
self.assertEqual(
result,
{
'msg': 'Ensure interface is configured to be a L2\nport first '
'before using this module. You can use\nthe slxos_'
'interface module for this.',
'failed': True
}
)
def test_slxos_l2_interface_trunk(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/4',
mode='trunk',
native_vlan='22',
trunk_allowed_vlans='200,22'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface ethernet 0/4',
'switchport trunk allowed vlan add 200,22',
'switchport trunk native vlan 22'
],
'changed': True,
'warnings': []
}
)
def test_slxos_l2_interface_invalid_argument(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mode='access',
access_vlan=10,
shawshank='Redemption'
))
result = self.execute_module(failed=True)
self.assertEqual(result['failed'], True)
self.assertTrue(re.match(
r'Unsupported parameters for \((basic.py|basic.pyc)\) module: '
'shawshank Supported parameters include: access_vlan, aggregate, '
'mode, name, native_vlan, state, trunk_allowed_vlans, trunk_vlans',
result['msg']
))
| gpl-3.0 |
sonaht/ansible | lib/ansible/modules/cloud/vmware/vmware_local_user_manager.py | 70 | 6582 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright IBM Corp. 2016
# Author(s): Andreas Nafpliotis <nafpliot@de.ibm.com>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_local_user_manager
short_description: Manage local users on an ESXi host
description:
- Manage local users on an ESXi host
version_added: "2.2"
author: Andreas Nafpliotis
notes:
- Tested on ESXi 6.0
- Be sure that the ESXi user used for login, has the appropriate rights to create / delete / edit users
requirements:
- "python >= 2.6"
- PyVmomi installed
options:
local_user_name:
description:
- The local user name to be changed
required: True
local_user_password:
description:
- The password to be set
required: False
local_user_description:
description:
- Description for the user
required: False
state:
description:
- Indicate desired state of the user. If the user already exists when C(state=present), the user info is updated
choices: ['present', 'absent']
default: present
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_local_user_manager command from Ansible Playbooks
- name: Add local user to ESXi
local_action:
module: vmware_local_user_manager
hostname: esxi_hostname
username: root
password: vmware
local_user_name: foo
'''
RETURN = '''# '''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareLocalUserManager(object):
def __init__(self, module):
self.module = module
self.content = connect_to_api(self.module)
self.local_user_name = self.module.params['local_user_name']
self.local_user_password = self.module.params['local_user_password']
self.local_user_description = self.module.params['local_user_description']
self.state = self.module.params['state']
def process_state(self):
try:
local_account_manager_states = {
'absent': {
'present': self.state_remove_user,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_update_user,
'absent': self.state_create_user,
}
}
local_account_manager_states[self.state][self.check_local_user_manager_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def check_local_user_manager_state(self):
user_account = self.find_user_account()
if not user_account:
return 'absent'
else:
return 'present'
def find_user_account(self):
searchStr = self.local_user_name
exactMatch = True
findUsers = True
findGroups = False
user_account = self.content.userDirectory.RetrieveUserGroups(None, searchStr, None, None, exactMatch, findUsers, findGroups)
return user_account
def create_account_spec(self):
account_spec = vim.host.LocalAccountManager.AccountSpecification()
account_spec.id = self.local_user_name
account_spec.password = self.local_user_password
account_spec.description = self.local_user_description
return account_spec
def state_create_user(self):
account_spec = self.create_account_spec()
try:
task = self.content.accountManager.CreateUser(account_spec)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_update_user(self):
account_spec = self.create_account_spec()
try:
task = self.content.accountManager.UpdateUser(account_spec)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_remove_user(self):
try:
task = self.content.accountManager.RemoveUser(self.local_user_name)
self.module.exit_json(changed=True)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(local_user_name=dict(required=True, type='str'),
local_user_password=dict(required=False, type='str', no_log=True),
local_user_description=dict(required=False, type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_local_user_manager = VMwareLocalUserManager(module)
vmware_local_user_manager.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
SOKP/external_chromium_org | tools/deep_memory_profiler/subcommands/expand.py | 26 | 5501 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import sys
from lib.policy import PolicySet
from lib.subcommand import SubCommand
LOGGER = logging.getLogger('dmprof')
class ExpandCommand(SubCommand):
def __init__(self):
super(ExpandCommand, self).__init__(
'Usage: %prog expand <dump> <policy> <component> <depth>')
self._parser.add_option('--alternative-dirs', dest='alternative_dirs',
metavar='/path/on/target@/path/on/host[:...]',
help='Read files in /path/on/host/ instead of '
'files in /path/on/target/.')
def do(self, sys_argv):
options, args = self._parse_args(sys_argv, 4)
dump_path = args[1]
target_policy = args[2]
component_name = args[3]
depth = args[4]
alternative_dirs_dict = {}
policy_set = PolicySet.load(SubCommand._parse_policy_list(target_policy))
if not policy_set[target_policy].find_rule(component_name):
sys.stderr.write("ERROR: Component %s not found in policy %s\n"
% (component_name, target_policy))
return 1
if options.alternative_dirs:
for alternative_dir_pair in options.alternative_dirs.split(':'):
target_path, host_path = alternative_dir_pair.split('@', 1)
alternative_dirs_dict[target_path] = host_path
(bucket_set, dump) = SubCommand.load_basic_files(
dump_path, False, alternative_dirs=alternative_dirs_dict)
ExpandCommand._output(dump, policy_set[target_policy], bucket_set,
component_name, int(depth), sys.stdout)
return 0
@staticmethod
def _output(dump, policy, bucket_set, component_name, depth, out):
"""Prints all stacktraces in a given component of given depth.
Args:
dump: A Dump object.
policy: A Policy object.
bucket_set: A BucketSet object.
component_name: A name of component for filtering.
depth: An integer representing depth to be printed.
out: An IO object to output.
"""
sizes = {}
ExpandCommand._accumulate(
dump, policy, bucket_set, component_name, depth, sizes)
sorted_sizes_list = sorted(
sizes.iteritems(), key=(lambda x: x[1]), reverse=True)
total = 0
# TODO(dmikurube): Better formatting.
for size_pair in sorted_sizes_list:
out.write('%10d %s\n' % (size_pair[1], size_pair[0]))
total += size_pair[1]
LOGGER.info('total: %d\n' % total)
@staticmethod
def _add_size(precedence, bucket, depth, committed, sizes):
stacktrace_sequence = precedence
for function, sourcefile in zip(
bucket.symbolized_stackfunction[
0 : min(len(bucket.symbolized_stackfunction), 1 + depth)],
bucket.symbolized_stacksourcefile[
0 : min(len(bucket.symbolized_stacksourcefile), 1 + depth)]):
stacktrace_sequence += '%s(@%s) ' % (function, sourcefile)
if not stacktrace_sequence in sizes:
sizes[stacktrace_sequence] = 0
sizes[stacktrace_sequence] += committed
@staticmethod
def _accumulate(dump, policy, bucket_set, component_name, depth, sizes):
rule = policy.find_rule(component_name)
if not rule:
pass
elif rule.allocator_type == 'malloc':
for bucket_id, _, committed, allocs, frees in dump.iter_stacktrace:
bucket = bucket_set.get(bucket_id)
if not bucket or bucket.allocator_type == 'malloc':
component_match = policy.find_malloc(bucket)
elif bucket.allocator_type == 'mmap':
continue
else:
assert False
if component_match == component_name:
precedence = ''
precedence += '(alloc=%d) ' % allocs
precedence += '(free=%d) ' % frees
if bucket.typeinfo:
precedence += '(type=%s) ' % bucket.symbolized_typeinfo
precedence += '(type.name=%s) ' % bucket.typeinfo_name
ExpandCommand._add_size(precedence, bucket, depth, committed, sizes)
elif rule.allocator_type == 'mmap':
for _, region in dump.iter_map:
if region[0] != 'hooked':
continue
component_match, bucket = policy.find_mmap(region, bucket_set)
if component_match == component_name:
ExpandCommand._add_size('', bucket, depth,
region[1]['committed'], sizes)
elif rule.allocator_type == 'unhooked':
for addr, region in dump.iter_map:
if region[0] != 'unhooked':
continue
component_match = policy.find_unhooked(region)
if component_match == component_name:
precedence = ''
precedence += '%s-' % hex(addr[0])[2:]
precedence += '%s' % hex(addr[1])[2:]
precedence += ' %s' % region[1]['vma']['readable']
precedence += '%s' % region[1]['vma']['writable']
precedence += '%s' % region[1]['vma']['executable']
precedence += '%s' % region[1]['vma']['private']
precedence += ' %s' % region[1]['vma']['offset']
precedence += ' %s:' % region[1]['vma']['major']
precedence += '%s' % region[1]['vma']['minor']
precedence += ' %s' % region[1]['vma']['inode']
precedence += ' %s' % region[1]['vma']['name']
if not precedence in sizes:
sizes[precedence] = 0
sizes[precedence] += region[1]['committed'] | bsd-3-clause |
rcbops/quantum-buildpackage | quantum/plugins/cisco/common/cisco_constants.py | 3 | 4307 | """
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
#
"""
PLUGINS = 'PLUGINS'
INVENTORY = 'INVENTORY'
PORT_STATE = 'port-state'
PORT_UP = "ACTIVE"
PORT_DOWN = "DOWN"
UUID = 'uuid'
TENANTID = 'tenant_id'
NETWORKID = 'network_id'
NETWORKNAME = 'name'
NETWORKPORTS = 'ports'
INTERFACEID = 'interface_id'
PORTSTATE = 'state'
PORTID = 'port_id'
PPNAME = 'name'
PPVLANID = 'vlan_id'
PPQOS = 'qos'
PPID = 'portprofile_id'
PPDEFAULT = 'default'
VLANID = 'vlan_id'
VLANNAME = 'vlan_name'
PORTPROFILENAME = 'portprofile_name'
QOS = 'qos'
ATTACHMENT = 'attachment'
PORT_ID = 'port-id'
NET_ID = 'net-id'
NET_NAME = 'net-name'
NET_PORTS = 'net-ports'
NET_VLAN_NAME = 'net-vlan-name'
NET_VLAN_ID = 'net-vlan-id'
NET_TENANTS = 'net-tenants'
TENANT_ID = 'tenant-id'
TENANT_NETWORKS = 'tenant-networks'
TENANT_NAME = 'tenant-name'
TENANT_PORTPROFILES = 'tenant-portprofiles'
TENANT_QOS_LEVELS = 'tenant-qos-levels'
TENANT_CREDENTIALS = 'tenant-credentials'
PORT_PROFILE = 'port-profile'
PROFILE_ID = 'profile_id'
PROFILE_NAME = 'profile_name'
PROFILE_VLAN_NAME = 'profile-vlan-name'
PROFILE_VLAN_ID = 'vlan-id'
PROFILE_QOS = 'qos_name'
PROFILE_ASSOCIATIONS = 'assignment'
QOS_LEVEL_ID = 'qos_id'
QOS_LEVEL_NAME = 'qos_name'
QOS_LEVEL_ASSOCIATIONS = 'qos-level-associations'
QOS_LEVEL_DESCRIPTION = 'qos_desc'
CREDENTIAL_ID = 'credential_id'
CREDENTIAL_NAME = 'credential_name'
CREDENTIAL_USERNAME = 'user_name'
CREDENTIAL_PASSWORD = 'password'
MASKED_PASSWORD = '********'
USERNAME = 'username'
PASSWORD = 'password'
LOGGER_COMPONENT_NAME = "cisco_plugin"
BLADE_INTF_DN = "blade_intf_distinguished_name"
BLADE_INTF_ORDER = "blade-intf-order"
BLADE_INTF_LINK_STATE = "blade-intf-link-state"
BLADE_INTF_OPER_STATE = "blade-intf-operational-state"
BLADE_INTF_INST_TYPE = "blade-intf-inst-type"
BLADE_INTF_RHEL_DEVICE_NAME = "blade-intf-rhel-device-name"
BLADE_INTF_DYNAMIC = "dynamic"
BLADE_INTF_STATE_UNKNOWN = "unknown"
BLADE_INTF_STATE_UNALLOCATED = "unallocated"
BLADE_INTF_RESERVED = "blade-intf-reserved"
BLADE_INTF_UNRESERVED = "blade-intf-unreserved"
BLADE_INTF_RESERVATION = "blade-intf-reservation-status"
BLADE_UNRESERVED_INTF_COUNT = "blade-unreserved-interfaces-count"
BLADE_INTF_DATA = "blade-intf-data"
LEAST_RSVD_BLADE_UCSM = "least-reserved-blade-ucsm"
LEAST_RSVD_BLADE_CHASSIS = "least-reserved-blade-chassis"
LEAST_RSVD_BLADE_ID = "least-reserved-blade-id"
LEAST_RSVD_BLADE_DATA = "least-reserved-blade-data"
RESERVED_NIC_HOSTNAME = "reserved-dynamic-nic-hostname"
RESERVED_NIC_NAME = "reserved-dynamic-nic-device-name"
RESERVED_INTERFACE_UCSM = "reserved-interface-ucsm-ip"
RESERVED_INTERFACE_CHASSIS = "reserved-interface-chassis"
RESERVED_INTERFACE_BLADE = "reserved-interface-blade"
RESERVED_INTERFACE_DN = "reserved-interface-dn"
RHEL_DEVICE_NAME_REPFIX = "eth"
UCS_PLUGIN = 'ucs_plugin'
NEXUS_PLUGIN = 'nexus_plugin'
UCS_INVENTORY = 'ucs_inventory'
NEXUS_INVENTORY = 'nexus_inventory'
PLUGIN_OBJ_REF = 'plugin-obj-ref'
PARAM_LIST = 'param-list'
DEVICE_IP = 'device_ip'
NO_VLAN_ID = 0
HOST_LIST = 'host_list'
HOST_1 = 'host_1'
VIF_DESC = 'vif_desc'
DEVICENAME = 'device'
UCSPROFILE = 'portprofile'
IP_ADDRESS = 'ip_address'
CHASSIS_ID = 'chassis_id'
BLADE_ID = 'blade_id'
HOST_NAME = 'host_name'
INSTANCE_ID = 'instance_id'
VIF_ID = 'vif_id'
PROJECT_ID = 'project_id'
UCS_INVENTORY = 'ucs_inventory'
LEAST_RSVD_BLADE_DICT = 'least_rsvd_blade_dict'
UCSM_IP = 'ucsm_ip_address'
NETWORK_ADMIN = 'network_admin'
NETID_LIST = 'net_id_list'
DELIMITERS = "[,;:\b\s]"
UUID_LENGTH = 36
UNPLUGGED = '(detached)'
ASSOCIATION_STATUS = 'association_status'
ATTACHED = 'attached'
DETACHED = 'detached'
| apache-2.0 |
koenbok/Cactus | cactus/bootstrap/archive.py | 5 | 1280 | #coding:utf-8
import os
import shutil
import tarfile
import zipfile
from six.moves import urllib
class Folder(object):
def __init__(self, from_path):
self.from_path = from_path
def extractall(self, path):
os.rmdir(path)
shutil.copytree(self.from_path, path)
def close(self):
pass
def open_zipfile(archive):
return zipfile.ZipFile(archive)
def open_tarfile(archive):
return tarfile.open(name=archive, mode='r')
SUPPORTED_ARCHIVES = [
(open_tarfile, tarfile.is_tarfile),
(open_zipfile, zipfile.is_zipfile),
(Folder, os.path.isdir),
]
def bootstrap_from_archive(path, skeleton):
if os.path.isfile(skeleton) or os.path.isdir(skeleton):
# Is is a local file?
skeleton_file = skeleton
else:
# Assume it's an URL
skeleton_file, headers = urllib.request.urlretrieve(skeleton)
for opener, test in SUPPORTED_ARCHIVES:
try:
if test(skeleton_file):
archive = opener(skeleton_file)
break
except IOError:
pass
else:
raise Exception("Unsupported skeleton file type. Only .tar and .zip are supported at this time.")
os.mkdir(path)
archive.extractall(path=path)
archive.close()
| bsd-3-clause |
jspargo/AneMo | django/lib/python2.7/site-packages/django/contrib/gis/tests/relatedapp/tests.py | 43 | 15237 | from __future__ import unicode_literals
from unittest import skipUnless
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB, mysql, oracle, no_mysql, no_oracle, no_spatialite
from django.test import TestCase
if HAS_GEOS:
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class RelatedGeoModelTest(TestCase):
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e):
self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
# Looks like PostGIS points by longitude value.
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertTrue(isinstance(d['point'], Geometry))
self.assertTrue(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
# TODO: fix on Oracle -- qs2 returns an empty result for an unknown reason
@no_oracle
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertTrue('Aurora' in names)
self.assertTrue('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertTrue(isinstance(qs[0]['point'], GEOSGeometry))
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
| gpl-2.0 |
foss-transportationmodeling/rettina-server | .env/lib/python2.7/site-packages/migrate/changeset/databases/oracle.py | 140 | 3655 | """
Oracle database specific implementations of changeset classes.
"""
import sqlalchemy as sa
from sqlalchemy.databases import oracle as sa_base
from migrate import exceptions
from migrate.changeset import ansisql
OracleSchemaGenerator = sa_base.OracleDDLCompiler
class OracleColumnGenerator(OracleSchemaGenerator, ansisql.ANSIColumnGenerator):
pass
class OracleColumnDropper(ansisql.ANSIColumnDropper):
pass
class OracleSchemaChanger(OracleSchemaGenerator, ansisql.ANSISchemaChanger):
def get_column_specification(self, column, **kwargs):
# Ignore the NOT NULL generated
override_nullable = kwargs.pop('override_nullable', None)
if override_nullable:
orig = column.nullable
column.nullable = True
ret = super(OracleSchemaChanger, self).get_column_specification(
column, **kwargs)
if override_nullable:
column.nullable = orig
return ret
def visit_column(self, delta):
keys = delta.keys()
if 'name' in keys:
self._run_subvisit(delta,
self._visit_column_name,
start_alter=False)
if len(set(('type', 'nullable', 'server_default')).intersection(keys)):
self._run_subvisit(delta,
self._visit_column_change,
start_alter=False)
def _visit_column_change(self, table, column, delta):
# Oracle cannot drop a default once created, but it can set it
# to null. We'll do that if default=None
# http://forums.oracle.com/forums/message.jspa?messageID=1273234#1273234
dropdefault_hack = (column.server_default is None \
and 'server_default' in delta.keys())
# Oracle apparently doesn't like it when we say "not null" if
# the column's already not null. Fudge it, so we don't need a
# new function
notnull_hack = ((not column.nullable) \
and ('nullable' not in delta.keys()))
# We need to specify NULL if we're removing a NOT NULL
# constraint
null_hack = (column.nullable and ('nullable' in delta.keys()))
if dropdefault_hack:
column.server_default = sa.PassiveDefault(sa.sql.null())
if notnull_hack:
column.nullable = True
colspec = self.get_column_specification(column,
override_nullable=null_hack)
if null_hack:
colspec += ' NULL'
if notnull_hack:
column.nullable = False
if dropdefault_hack:
column.server_default = None
self.start_alter_table(table)
self.append("MODIFY (")
self.append(colspec)
self.append(")")
class OracleConstraintCommon(object):
def get_constraint_name(self, cons):
# Oracle constraints can't guess their name like other DBs
if not cons.name:
raise exceptions.NotSupportedError(
"Oracle constraint names must be explicitly stated")
return cons.name
class OracleConstraintGenerator(OracleConstraintCommon,
ansisql.ANSIConstraintGenerator):
pass
class OracleConstraintDropper(OracleConstraintCommon,
ansisql.ANSIConstraintDropper):
pass
class OracleDialect(ansisql.ANSIDialect):
columngenerator = OracleColumnGenerator
columndropper = OracleColumnDropper
schemachanger = OracleSchemaChanger
constraintgenerator = OracleConstraintGenerator
constraintdropper = OracleConstraintDropper
| apache-2.0 |
DirtyPiece/dancestudio | Build/Tools/Python27/Lib/xml/dom/domreg.py | 238 | 3478 | """Registration facilities for DOM. This module should not be used
directly. Instead, the functions getDOMImplementation and
registerDOMImplementation should be imported from xml.dom."""
from xml.dom.minicompat import * # isinstance, StringTypes
# This is a list of well-known implementations. Well-known names
# should be published by posting to xml-sig@python.org, and are
# subsequently recorded in this file.
well_known_implementations = {
'minidom':'xml.dom.minidom',
'4DOM': 'xml.dom.DOMImplementation',
}
# DOM implementations not officially registered should register
# themselves with their
registered = {}
def registerDOMImplementation(name, factory):
"""registerDOMImplementation(name, factory)
Register the factory function with the name. The factory function
should return an object which implements the DOMImplementation
interface. The factory function can either return the same object,
or a new one (e.g. if that implementation supports some
customization)."""
registered[name] = factory
def _good_enough(dom, features):
"_good_enough(dom, features) -> Return 1 if the dom offers the features"
for f,v in features:
if not dom.hasFeature(f,v):
return 0
return 1
def getDOMImplementation(name = None, features = ()):
"""getDOMImplementation(name = None, features = ()) -> DOM implementation.
Return a suitable DOM implementation. The name is either
well-known, the module name of a DOM implementation, or None. If
it is not None, imports the corresponding module and returns
DOMImplementation object if the import succeeds.
If name is not given, consider the available implementations to
find one with the required feature set. If no implementation can
be found, raise an ImportError. The features list must be a sequence
of (feature, version) pairs which are passed to hasFeature."""
import os
creator = None
mod = well_known_implementations.get(name)
if mod:
mod = __import__(mod, {}, {}, ['getDOMImplementation'])
return mod.getDOMImplementation()
elif name:
return registered[name]()
elif "PYTHON_DOM" in os.environ:
return getDOMImplementation(name = os.environ["PYTHON_DOM"])
# User did not specify a name, try implementations in arbitrary
# order, returning the one that has the required features
if isinstance(features, StringTypes):
features = _parse_feature_string(features)
for creator in registered.values():
dom = creator()
if _good_enough(dom, features):
return dom
for creator in well_known_implementations.keys():
try:
dom = getDOMImplementation(name = creator)
except StandardError: # typically ImportError, or AttributeError
continue
if _good_enough(dom, features):
return dom
raise ImportError,"no suitable DOM implementation found"
def _parse_feature_string(s):
features = []
parts = s.split()
i = 0
length = len(parts)
while i < length:
feature = parts[i]
if feature[0] in "0123456789":
raise ValueError, "bad feature name: %r" % (feature,)
i = i + 1
version = None
if i < length:
v = parts[i]
if v[0] in "0123456789":
i = i + 1
version = v
features.append((feature, version))
return tuple(features)
| mit |
plotly/python-api | packages/python/plotly/plotly/graph_objs/histogram2d/_stream.py | 2 | 4158 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram2d"
_path_str = "histogram2d.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2d.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram2d.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2d.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
cpennington/edx-platform | openedx/core/djangoapps/bookmarks/tests/test_models.py | 4 | 20788 | """
Tests for Bookmarks models.
"""
import datetime
from contextlib import contextmanager
import ddt
import mock
import pytz
from freezegun import freeze_time
from opaque_keys.edx.keys import UsageKey
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
from six import text_type
from six.moves import range
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import AdminFactory, UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
from .. import DEFAULT_FIELDS, OPTIONAL_FIELDS, PathItem
from ..models import Bookmark, XBlockCache, parse_path_data
from .factories import BookmarkFactory
EXAMPLE_USAGE_KEY_1 = u'i4x://org.15/course_15/chapter/Week_1'
EXAMPLE_USAGE_KEY_2 = u'i4x://org.15/course_15/chapter/Week_2'
noop_contextmanager = contextmanager(lambda x: (yield)) # pylint: disable=invalid-name
class BookmarksTestsBase(ModuleStoreTestCase):
"""
Test the Bookmark model.
"""
ALL_FIELDS = DEFAULT_FIELDS + OPTIONAL_FIELDS
STORE_TYPE = ModuleStoreEnum.Type.mongo
TEST_PASSWORD = 'test'
ENABLED_CACHES = ['default', 'mongo_metadata_inheritance', 'loc_cache']
def setUp(self):
super(BookmarksTestsBase, self).setUp()
self.admin = AdminFactory()
self.user = UserFactory.create(password=self.TEST_PASSWORD)
self.other_user = UserFactory.create(password=self.TEST_PASSWORD)
self.setup_data(self.STORE_TYPE)
def setup_data(self, store_type=ModuleStoreEnum.Type.mongo):
""" Create courses and add some test blocks. """
with self.store.default_store(store_type):
self.course = CourseFactory.create(display_name='An Introduction to API Testing')
self.course_id = text_type(self.course.id)
with self.store.bulk_operations(self.course.id):
self.chapter_1 = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name='Week 1'
)
self.chapter_2 = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name='Week 2'
)
self.sequential_1 = ItemFactory.create(
parent_location=self.chapter_1.location, category='sequential', display_name='Lesson 1'
)
self.sequential_2 = ItemFactory.create(
parent_location=self.chapter_1.location, category='sequential', display_name='Lesson 2'
)
self.vertical_1 = ItemFactory.create(
parent_location=self.sequential_1.location, category='vertical', display_name='Subsection 1'
)
self.vertical_2 = ItemFactory.create(
parent_location=self.sequential_2.location, category='vertical', display_name='Subsection 2'
)
self.vertical_3 = ItemFactory.create(
parent_location=self.sequential_2.location, category='vertical', display_name='Subsection 3'
)
self.html_1 = ItemFactory.create(
parent_location=self.vertical_2.location, category='html', display_name='Details 1'
)
self.path = [
PathItem(self.chapter_1.location, self.chapter_1.display_name),
PathItem(self.sequential_2.location, self.sequential_2.display_name),
]
self.bookmark_1 = BookmarkFactory.create(
user=self.user,
course_key=self.course_id,
usage_key=self.sequential_1.location,
xblock_cache=XBlockCache.create({
'display_name': self.sequential_1.display_name,
'usage_key': self.sequential_1.location,
}),
)
self.bookmark_2 = BookmarkFactory.create(
user=self.user,
course_key=self.course_id,
usage_key=self.sequential_2.location,
xblock_cache=XBlockCache.create({
'display_name': self.sequential_2.display_name,
'usage_key': self.sequential_2.location,
}),
)
self.other_course = CourseFactory.create(display_name='An Introduction to API Testing 2')
with self.store.bulk_operations(self.other_course.id):
self.other_chapter_1 = ItemFactory.create(
parent_location=self.other_course.location, category='chapter', display_name='Other Week 1'
)
self.other_sequential_1 = ItemFactory.create(
parent_location=self.other_chapter_1.location, category='sequential', display_name='Other Lesson 1'
)
self.other_sequential_2 = ItemFactory.create(
parent_location=self.other_chapter_1.location, category='sequential', display_name='Other Lesson 2'
)
self.other_vertical_1 = ItemFactory.create(
parent_location=self.other_sequential_1.location, category='vertical', display_name='Other Subsection 1'
)
self.other_vertical_2 = ItemFactory.create(
parent_location=self.other_sequential_1.location, category='vertical', display_name='Other Subsection 2'
)
# self.other_vertical_1 has two parents
self.other_sequential_2.children.append(self.other_vertical_1.location)
modulestore().update_item(self.other_sequential_2, self.admin.id)
self.other_bookmark_1 = BookmarkFactory.create(
user=self.user,
course_key=text_type(self.other_course.id),
usage_key=self.other_vertical_1.location,
xblock_cache=XBlockCache.create({
'display_name': self.other_vertical_1.display_name,
'usage_key': self.other_vertical_1.location,
}),
)
def create_course_with_blocks(self, children_per_block=1, depth=1, store_type=ModuleStoreEnum.Type.mongo):
"""
Create a course and add blocks.
"""
with self.store.default_store(store_type):
course = CourseFactory.create()
display_name = 0
with self.store.bulk_operations(course.id):
blocks_at_next_level = [course]
for __ in range(depth):
blocks_at_current_level = blocks_at_next_level
blocks_at_next_level = []
for block in blocks_at_current_level:
for __ in range(children_per_block):
blocks_at_next_level += [ItemFactory.create(
parent_location=block.scope_ids.usage_id, display_name=text_type(display_name)
)]
display_name += 1
return course
def create_course_with_bookmarks_count(self, count, store_type=ModuleStoreEnum.Type.mongo):
"""
Create a course, add some content and add bookmarks.
"""
with self.store.default_store(store_type):
course = CourseFactory.create()
with self.store.bulk_operations(course.id):
blocks = [ItemFactory.create(
parent_location=course.location, category='chapter', display_name=text_type(index)
) for index in range(count)]
bookmarks = [BookmarkFactory.create(
user=self.user,
course_key=course.id,
usage_key=block.location,
xblock_cache=XBlockCache.create({
'display_name': block.display_name,
'usage_key': block.location,
}),
) for block in blocks]
return course, blocks, bookmarks
def assert_bookmark_model_is_valid(self, bookmark, bookmark_data):
"""
Assert that the attributes of the bookmark model were set correctly.
"""
self.assertEqual(bookmark.user, bookmark_data['user'])
self.assertEqual(bookmark.course_key, bookmark_data['course_key'])
self.assertEqual(text_type(bookmark.usage_key), text_type(bookmark_data['usage_key']))
self.assertEqual(bookmark.resource_id, u"{},{}".format(bookmark_data['user'], bookmark_data['usage_key']))
self.assertEqual(bookmark.display_name, bookmark_data['display_name'])
self.assertEqual(bookmark.path, self.path)
self.assertIsNotNone(bookmark.created)
self.assertEqual(bookmark.xblock_cache.course_key, bookmark_data['course_key'])
self.assertEqual(bookmark.xblock_cache.display_name, bookmark_data['display_name'])
def assert_bookmark_data_is_valid(self, bookmark, bookmark_data, check_optional_fields=False):
"""
Assert that the bookmark data matches the data in the model.
"""
self.assertEqual(bookmark_data['id'], bookmark.resource_id)
self.assertEqual(bookmark_data['course_id'], text_type(bookmark.course_key))
self.assertEqual(bookmark_data['usage_id'], text_type(bookmark.usage_key))
self.assertEqual(bookmark_data['block_type'], text_type(bookmark.usage_key.block_type))
self.assertIsNotNone(bookmark_data['created'])
if check_optional_fields:
self.assertEqual(bookmark_data['display_name'], bookmark.display_name)
self.assertEqual(bookmark_data['path'], bookmark.path)
@ddt.ddt
@skip_unless_lms
class BookmarkModelTests(BookmarksTestsBase):
"""
Test the Bookmark model.
"""
def setUp(self):
super(BookmarkModelTests, self).setUp()
self.vertical_4 = ItemFactory.create(
parent_location=self.sequential_2.location,
category='vertical',
display_name=None
)
def get_bookmark_data(self, block, user=None):
"""
Returns bookmark data for testing.
"""
return {
'user': user or self.user,
'usage_key': block.location,
'course_key': block.location.course_key,
'display_name': block.display_name,
}
@ddt.data(
(ModuleStoreEnum.Type.mongo, 'course', [], 3),
(ModuleStoreEnum.Type.mongo, 'chapter_1', [], 3),
(ModuleStoreEnum.Type.mongo, 'sequential_1', ['chapter_1'], 4),
(ModuleStoreEnum.Type.mongo, 'vertical_1', ['chapter_1', 'sequential_1'], 6),
(ModuleStoreEnum.Type.mongo, 'html_1', ['chapter_1', 'sequential_2', 'vertical_2'], 7),
(ModuleStoreEnum.Type.split, 'course', [], 3),
(ModuleStoreEnum.Type.split, 'chapter_1', [], 2),
(ModuleStoreEnum.Type.split, 'sequential_1', ['chapter_1'], 2),
(ModuleStoreEnum.Type.split, 'vertical_1', ['chapter_1', 'sequential_1'], 2),
(ModuleStoreEnum.Type.split, 'html_1', ['chapter_1', 'sequential_2', 'vertical_2'], 2),
)
@ddt.unpack
def test_path_and_queries_on_create(self, store_type, block_to_bookmark, ancestors_attrs, expected_mongo_calls):
"""
In case of mongo, 1 query is used to fetch the block, and 2
by path_to_location(), and then 1 query per parent in path
is needed to fetch the parent blocks.
"""
self.setup_data(store_type)
user = UserFactory.create()
expected_path = [PathItem(
usage_key=getattr(self, ancestor_attr).location, display_name=getattr(self, ancestor_attr).display_name
) for ancestor_attr in ancestors_attrs]
bookmark_data = self.get_bookmark_data(getattr(self, block_to_bookmark), user=user)
with check_mongo_calls(expected_mongo_calls):
bookmark, __ = Bookmark.create(bookmark_data)
self.assertEqual(bookmark.path, expected_path)
self.assertIsNotNone(bookmark.xblock_cache)
self.assertEqual(bookmark.xblock_cache.paths, [])
def test_create_bookmark_success(self):
"""
Tests creation of bookmark.
"""
bookmark_data = self.get_bookmark_data(self.vertical_2)
bookmark, __ = Bookmark.create(bookmark_data)
self.assert_bookmark_model_is_valid(bookmark, bookmark_data)
bookmark_data_different_values = self.get_bookmark_data(self.vertical_2)
bookmark_data_different_values['display_name'] = 'Introduction Video'
bookmark2, __ = Bookmark.create(bookmark_data_different_values)
# The bookmark object already created should have been returned without modifications.
self.assertEqual(bookmark, bookmark2)
self.assertEqual(bookmark.xblock_cache, bookmark2.xblock_cache)
self.assert_bookmark_model_is_valid(bookmark2, bookmark_data)
bookmark_data_different_user = self.get_bookmark_data(self.vertical_2)
bookmark_data_different_user['user'] = UserFactory.create()
bookmark3, __ = Bookmark.create(bookmark_data_different_user)
self.assertNotEqual(bookmark, bookmark3)
self.assert_bookmark_model_is_valid(bookmark3, bookmark_data_different_user)
def test_create_bookmark_successfully_with_display_name_none(self):
"""
Tests creation of bookmark with display_name None.
"""
bookmark_data = self.get_bookmark_data(self.vertical_4)
bookmark, __ = Bookmark.create(bookmark_data)
bookmark_data['display_name'] = self.vertical_4.display_name_with_default
self.assert_bookmark_model_is_valid(bookmark, bookmark_data)
@ddt.data(
(-30, [[PathItem(EXAMPLE_USAGE_KEY_1, '1')]], 1),
(30, None, 2),
(30, [], 2),
(30, [[PathItem(EXAMPLE_USAGE_KEY_1, '1')]], 1),
(30, [[PathItem(EXAMPLE_USAGE_KEY_1, '1')], [PathItem(EXAMPLE_USAGE_KEY_2, '2')]], 2),
)
@ddt.unpack
@mock.patch('openedx.core.djangoapps.bookmarks.models.Bookmark.get_path')
def test_path(self, seconds_delta, paths, get_path_call_count, mock_get_path):
block_path = [PathItem(UsageKey.from_string(EXAMPLE_USAGE_KEY_1), '1')]
mock_get_path.return_value = block_path
html = ItemFactory.create(
parent_location=self.other_chapter_1.location, category='html', display_name='Other Lesson 1'
)
bookmark_data = self.get_bookmark_data(html)
bookmark, __ = Bookmark.create(bookmark_data)
self.assertIsNotNone(bookmark.xblock_cache)
modification_datetime = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=seconds_delta)
with freeze_time(modification_datetime):
bookmark.xblock_cache.paths = paths
bookmark.xblock_cache.save()
self.assertEqual(bookmark.path, block_path)
self.assertEqual(mock_get_path.call_count, get_path_call_count)
@ddt.data(
(ModuleStoreEnum.Type.mongo, 2, 2, 2),
(ModuleStoreEnum.Type.mongo, 4, 2, 2),
(ModuleStoreEnum.Type.mongo, 6, 2, 2),
(ModuleStoreEnum.Type.mongo, 2, 3, 3),
(ModuleStoreEnum.Type.mongo, 4, 3, 3),
# (ModuleStoreEnum.Type.mongo, 6, 3, 3), Too slow.
(ModuleStoreEnum.Type.mongo, 2, 4, 4),
# (ModuleStoreEnum.Type.mongo, 4, 4, 4),
(ModuleStoreEnum.Type.split, 2, 2, 2),
(ModuleStoreEnum.Type.split, 4, 2, 2),
(ModuleStoreEnum.Type.split, 2, 3, 2),
# (ModuleStoreEnum.Type.split, 4, 3, 2),
(ModuleStoreEnum.Type.split, 2, 4, 2),
)
@ddt.unpack
def test_get_path_queries(self, store_type, children_per_block, depth, expected_mongo_calls):
"""
In case of mongo, 2 queries are used by path_to_location(), and then
1 query per parent in path is needed to fetch the parent blocks.
"""
course = self.create_course_with_blocks(children_per_block, depth, store_type)
# Find a leaf block.
block = modulestore().get_course(course.id, depth=None)
for __ in range(depth - 1):
children = block.get_children()
block = children[-1]
with check_mongo_calls(expected_mongo_calls):
path = Bookmark.get_path(block.location)
self.assertEqual(len(path), depth - 2)
def test_get_path_in_case_of_exceptions(self):
user = UserFactory.create()
# Block does not exist
usage_key = UsageKey.from_string('i4x://edX/apis/html/interactive')
usage_key.replace(course_key=self.course.id)
self.assertEqual(Bookmark.get_path(usage_key), [])
# Block is an orphan
self.other_sequential_1.children = []
modulestore().update_item(self.other_sequential_1, self.admin.id)
bookmark_data = self.get_bookmark_data(self.other_vertical_2, user=user)
bookmark, __ = Bookmark.create(bookmark_data)
self.assertEqual(bookmark.path, [])
self.assertIsNotNone(bookmark.xblock_cache)
self.assertEqual(bookmark.xblock_cache.paths, [])
# Parent block could not be retrieved
with mock.patch('openedx.core.djangoapps.bookmarks.models.search.path_to_location') as mock_path_to_location:
mock_path_to_location.return_value = [usage_key]
bookmark_data = self.get_bookmark_data(self.other_sequential_1, user=user)
bookmark, __ = Bookmark.create(bookmark_data)
self.assertEqual(bookmark.path, [])
@ddt.ddt
class XBlockCacheModelTest(ModuleStoreTestCase):
"""
Test the XBlockCache model.
"""
COURSE_KEY = CourseLocator(org='test', course='test', run='test')
CHAPTER1_USAGE_KEY = BlockUsageLocator(COURSE_KEY, block_type='chapter', block_id='chapter1')
SECTION1_USAGE_KEY = BlockUsageLocator(COURSE_KEY, block_type='section', block_id='section1')
SECTION2_USAGE_KEY = BlockUsageLocator(COURSE_KEY, block_type='section', block_id='section1')
VERTICAL1_USAGE_KEY = BlockUsageLocator(COURSE_KEY, block_type='vertical', block_id='sequential1')
PATH1 = [
[text_type(CHAPTER1_USAGE_KEY), 'Chapter 1'],
[text_type(SECTION1_USAGE_KEY), 'Section 1'],
]
PATH2 = [
[text_type(CHAPTER1_USAGE_KEY), 'Chapter 1'],
[text_type(SECTION2_USAGE_KEY), 'Section 2'],
]
def assert_xblock_cache_data(self, xblock_cache, data):
"""
Assert that the XBlockCache object values match.
"""
self.assertEqual(xblock_cache.usage_key, data['usage_key'])
self.assertEqual(xblock_cache.course_key, data['usage_key'].course_key)
self.assertEqual(xblock_cache.display_name, data['display_name'])
self.assertEqual(xblock_cache._paths, data['_paths']) # pylint: disable=protected-access
self.assertEqual(xblock_cache.paths, [parse_path_data(path) for path in data['_paths']])
@ddt.data(
(
[
{'usage_key': VERTICAL1_USAGE_KEY, },
{'display_name': '', '_paths': [], },
],
[
{'usage_key': VERTICAL1_USAGE_KEY, 'display_name': 'Vertical 5', '_paths': [PATH2]},
{'_paths': []},
],
),
(
[
{'usage_key': VERTICAL1_USAGE_KEY, 'display_name': 'Vertical 4', '_paths': [PATH1]},
{},
],
[
{'usage_key': VERTICAL1_USAGE_KEY, 'display_name': 'Vertical 5', '_paths': [PATH2]},
{'_paths': [PATH1]},
],
),
)
def test_create(self, data):
"""
Test XBlockCache.create() constructs and updates objects correctly.
"""
for create_data, additional_data_to_expect in data:
xblock_cache = XBlockCache.create(create_data)
create_data.update(additional_data_to_expect)
self.assert_xblock_cache_data(xblock_cache, create_data)
@ddt.data(
([], [PATH1]),
([PATH1, PATH2], [PATH1]),
([PATH1], []),
)
@ddt.unpack
def test_paths(self, original_paths, updated_paths):
xblock_cache = XBlockCache.create({
'usage_key': self.VERTICAL1_USAGE_KEY,
'display_name': 'The end.',
'_paths': original_paths,
})
self.assertEqual(xblock_cache.paths, [parse_path_data(path) for path in original_paths])
xblock_cache.paths = [parse_path_data(path) for path in updated_paths]
xblock_cache.save()
xblock_cache = XBlockCache.objects.get(id=xblock_cache.id)
self.assertEqual(xblock_cache._paths, updated_paths) # pylint: disable=protected-access
self.assertEqual(xblock_cache.paths, [parse_path_data(path) for path in updated_paths])
| agpl-3.0 |
caosmo/pip | pip/_vendor/cachecontrol/heuristics.py | 374 | 4053 | import calendar
import time
from email.utils import formatdate, parsedate, parsedate_tz
from datetime import datetime, timedelta
TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT"
def expire_after(delta, date=None):
date = date or datetime.now()
return date + delta
def datetime_to_header(dt):
return formatdate(calendar.timegm(dt.timetuple()))
class BaseHeuristic(object):
def warning(self, response):
"""
Return a valid 1xx warning header value describing the cache
adjustments.
The response is provided too allow warnings like 113
http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need
to explicitly say response is over 24 hours old.
"""
return '110 - "Response is Stale"'
def update_headers(self, response):
"""Update the response headers with any new headers.
NOTE: This SHOULD always include some Warning header to
signify that the response was cached by the client, not
by way of the provided headers.
"""
return {}
def apply(self, response):
warning_header_value = self.warning(response)
response.headers.update(self.update_headers(response))
if warning_header_value is not None:
response.headers.update({'Warning': warning_header_value})
return response
class OneDayCache(BaseHeuristic):
"""
Cache the response by providing an expires 1 day in the
future.
"""
def update_headers(self, response):
headers = {}
if 'expires' not in response.headers:
date = parsedate(response.headers['date'])
expires = expire_after(timedelta(days=1),
date=datetime(*date[:6]))
headers['expires'] = datetime_to_header(expires)
headers['cache-control'] = 'public'
return headers
class ExpiresAfter(BaseHeuristic):
"""
Cache **all** requests for a defined time period.
"""
def __init__(self, **kw):
self.delta = timedelta(**kw)
def update_headers(self, response):
expires = expire_after(self.delta)
return {
'expires': datetime_to_header(expires),
'cache-control': 'public',
}
def warning(self, response):
tmpl = '110 - Automatically cached for %s. Response might be stale'
return tmpl % self.delta
class LastModified(BaseHeuristic):
"""
If there is no Expires header already, fall back on Last-Modified
using the heuristic from
http://tools.ietf.org/html/rfc7234#section-4.2.2
to calculate a reasonable value.
Firefox also does something like this per
https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ
http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397
Unlike mozilla we limit this to 24-hr.
"""
cacheable_by_default_statuses = set([
200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501
])
def update_headers(self, resp):
headers = resp.headers
if 'expires' in headers:
return {}
if 'cache-control' in headers and headers['cache-control'] != 'public':
return {}
if resp.status not in self.cacheable_by_default_statuses:
return {}
if 'date' not in headers or 'last-modified' not in headers:
return {}
date = calendar.timegm(parsedate_tz(headers['date']))
last_modified = parsedate(headers['last-modified'])
if date is None or last_modified is None:
return {}
now = time.time()
current_age = max(0, now - date)
delta = date - calendar.timegm(last_modified)
freshness_lifetime = max(0, min(delta / 10, 24 * 3600))
if freshness_lifetime <= current_age:
return {}
expires = date + freshness_lifetime
return {'expires': time.strftime(TIME_FMT, time.gmtime(expires))}
def warning(self, resp):
return None
| mit |
persandstrom/home-assistant | homeassistant/components/climate/sensibo.py | 1 | 12148 | """
Support for Sensibo wifi-enabled home thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.sensibo/
"""
import asyncio
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_STATE, ATTR_TEMPERATURE, CONF_API_KEY, CONF_ID,
STATE_ON, STATE_OFF, TEMP_CELSIUS, TEMP_FAHRENHEIT)
from homeassistant.components.climate import (
ATTR_CURRENT_HUMIDITY, ClimateDevice, DOMAIN, PLATFORM_SCHEMA,
SUPPORT_TARGET_TEMPERATURE, SUPPORT_OPERATION_MODE,
SUPPORT_FAN_MODE, SUPPORT_SWING_MODE,
SUPPORT_ON_OFF)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util.temperature import convert as convert_temperature
REQUIREMENTS = ['pysensibo==1.0.3']
_LOGGER = logging.getLogger(__name__)
ALL = ['all']
TIMEOUT = 10
SERVICE_ASSUME_STATE = 'sensibo_assume_state'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_ID, default=ALL): vol.All(cv.ensure_list, [cv.string]),
})
ASSUME_STATE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_STATE): cv.string,
})
_FETCH_FIELDS = ','.join([
'room{name}', 'measurements', 'remoteCapabilities',
'acState', 'connectionStatus{isAlive}', 'temperatureUnit'])
_INITIAL_FETCH_FIELDS = 'id,' + _FETCH_FIELDS
FIELD_TO_FLAG = {
'fanLevel': SUPPORT_FAN_MODE,
'mode': SUPPORT_OPERATION_MODE,
'swing': SUPPORT_SWING_MODE,
'targetTemperature': SUPPORT_TARGET_TEMPERATURE,
'on': SUPPORT_ON_OFF,
}
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up Sensibo devices."""
import pysensibo
client = pysensibo.SensiboClient(
config[CONF_API_KEY], session=async_get_clientsession(hass),
timeout=TIMEOUT)
devices = []
try:
for dev in (
yield from client.async_get_devices(_INITIAL_FETCH_FIELDS)):
if config[CONF_ID] == ALL or dev['id'] in config[CONF_ID]:
devices.append(SensiboClimate(
client, dev, hass.config.units.temperature_unit))
except (aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError):
_LOGGER.exception('Failed to connect to Sensibo servers.')
raise PlatformNotReady
if devices:
async_add_entities(devices)
@asyncio.coroutine
def async_assume_state(service):
"""Set state according to external service call.."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_climate = [device for device in devices
if device.entity_id in entity_ids]
else:
target_climate = devices
update_tasks = []
for climate in target_climate:
yield from climate.async_assume_state(
service.data.get(ATTR_STATE))
update_tasks.append(climate.async_update_ha_state(True))
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
hass.services.async_register(
DOMAIN, SERVICE_ASSUME_STATE, async_assume_state,
schema=ASSUME_STATE_SCHEMA)
class SensiboClimate(ClimateDevice):
"""Representation of a Sensibo device."""
def __init__(self, client, data, units):
"""Build SensiboClimate.
client: aiohttp session.
data: initially-fetched data.
"""
self._client = client
self._id = data['id']
self._external_state = None
self._units = units
self._do_update(data)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._supported_features
def _do_update(self, data):
self._name = data['room']['name']
self._measurements = data['measurements']
self._ac_states = data['acState']
self._status = data['connectionStatus']['isAlive']
capabilities = data['remoteCapabilities']
self._operations = sorted(capabilities['modes'].keys())
self._current_capabilities = capabilities[
'modes'][self.current_operation]
temperature_unit_key = data.get('temperatureUnit') or \
self._ac_states.get('temperatureUnit')
if temperature_unit_key:
self._temperature_unit = TEMP_CELSIUS if \
temperature_unit_key == 'C' else TEMP_FAHRENHEIT
self._temperatures_list = self._current_capabilities[
'temperatures'].get(temperature_unit_key, {}).get('values', [])
else:
self._temperature_unit = self._units
self._temperatures_list = []
self._supported_features = 0
for key in self._ac_states:
if key in FIELD_TO_FLAG:
self._supported_features |= FIELD_TO_FLAG[key]
@property
def state(self):
"""Return the current state."""
return self._external_state or super().state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_CURRENT_HUMIDITY: self.current_humidity,
'battery': self.current_battery}
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return self._temperature_unit
@property
def available(self):
"""Return True if entity is available."""
return self._status
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._ac_states.get('targetTemperature')
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
if self.temperature_unit == self.hass.config.units.temperature_unit:
# We are working in same units as the a/c unit. Use whole degrees
# like the API supports.
return 1
# Unit conversion is going on. No point to stick to specific steps.
return None
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self._ac_states['mode']
@property
def current_humidity(self):
"""Return the current humidity."""
return self._measurements['humidity']
@property
def current_battery(self):
"""Return the current battery voltage."""
return self._measurements.get('batteryVoltage')
@property
def current_temperature(self):
"""Return the current temperature."""
# This field is not affected by temperatureUnit.
# It is always in C
return convert_temperature(
self._measurements['temperature'],
TEMP_CELSIUS,
self.temperature_unit)
@property
def operation_list(self):
"""List of available operation modes."""
return self._operations
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self._ac_states.get('fanLevel')
@property
def fan_list(self):
"""List of available fan modes."""
return self._current_capabilities.get('fanLevels')
@property
def current_swing_mode(self):
"""Return the fan setting."""
return self._ac_states.get('swing')
@property
def swing_list(self):
"""List of available swing modes."""
return self._current_capabilities.get('swing')
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def is_on(self):
"""Return true if AC is on."""
return self._ac_states['on']
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._temperatures_list[0] \
if self._temperatures_list else super().min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._temperatures_list[-1] \
if self._temperatures_list else super().max_temp
@property
def unique_id(self):
"""Return unique ID based on Sensibo ID."""
return self._id
@asyncio.coroutine
def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
temperature = int(temperature)
if temperature not in self._temperatures_list:
# Requested temperature is not supported.
if temperature == self.target_temperature:
return
index = self._temperatures_list.index(self.target_temperature)
if temperature > self.target_temperature and index < len(
self._temperatures_list) - 1:
temperature = self._temperatures_list[index + 1]
elif temperature < self.target_temperature and index > 0:
temperature = self._temperatures_list[index - 1]
else:
return
with async_timeout.timeout(TIMEOUT):
yield from self._client.async_set_ac_state_property(
self._id, 'targetTemperature', temperature, self._ac_states)
@asyncio.coroutine
def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
with async_timeout.timeout(TIMEOUT):
yield from self._client.async_set_ac_state_property(
self._id, 'fanLevel', fan_mode, self._ac_states)
@asyncio.coroutine
def async_set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
with async_timeout.timeout(TIMEOUT):
yield from self._client.async_set_ac_state_property(
self._id, 'mode', operation_mode, self._ac_states)
@asyncio.coroutine
def async_set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
with async_timeout.timeout(TIMEOUT):
yield from self._client.async_set_ac_state_property(
self._id, 'swing', swing_mode, self._ac_states)
@asyncio.coroutine
def async_turn_on(self):
"""Turn Sensibo unit on."""
with async_timeout.timeout(TIMEOUT):
yield from self._client.async_set_ac_state_property(
self._id, 'on', True, self._ac_states)
@asyncio.coroutine
def async_turn_off(self):
"""Turn Sensibo unit on."""
with async_timeout.timeout(TIMEOUT):
yield from self._client.async_set_ac_state_property(
self._id, 'on', False, self._ac_states)
@asyncio.coroutine
def async_assume_state(self, state):
"""Set external state."""
change_needed = (state != STATE_OFF and not self.is_on) \
or (state == STATE_OFF and self.is_on)
if change_needed:
with async_timeout.timeout(TIMEOUT):
yield from self._client.async_set_ac_state_property(
self._id,
'on',
state != STATE_OFF, # value
self._ac_states,
True # assumed_state
)
if state in [STATE_ON, STATE_OFF]:
self._external_state = None
else:
self._external_state = state
@asyncio.coroutine
def async_update(self):
"""Retrieve latest state."""
try:
with async_timeout.timeout(TIMEOUT):
data = yield from self._client.async_get_device(
self._id, _FETCH_FIELDS)
self._do_update(data)
except aiohttp.client_exceptions.ClientError:
_LOGGER.warning('Failed to connect to Sensibo servers.')
| apache-2.0 |
spcui/avocado-vt | virttest/libvirt_vm.py | 5 | 98194 | """
Utility classes and functions to handle Virtual Machine creation using libvirt.
:copyright: 2011 Red Hat Inc.
"""
import time
import string
import os
import logging
import fcntl
import re
import shutil
import tempfile
import platform
from avocado.utils import process
from avocado.utils import crypto
from avocado.core import exceptions
import aexpect
from . import error_context
from . import utils_misc
from . import virt_vm
from . import storage
from . import remote
from . import virsh
from . import libvirt_xml
from . import data_dir
from . import xml_utils
from . import utils_selinux
def normalize_connect_uri(connect_uri):
"""
Processes connect_uri Cartesian into something virsh can use
:param connect_uri: Cartesian Params setting
:return: Normalized connect_uri
"""
if connect_uri == "default":
result = virsh.canonical_uri()
else:
result = virsh.canonical_uri(uri=connect_uri)
if not result:
raise ValueError("Normalizing connect_uri '%s' failed" % connect_uri)
return result
def complete_uri(ip_address):
"""
Return a complete URI with the combination of ip_address and local uri.
It is useful when you need to connect remote hypervisor.
:param ip_address: an ip address or a hostname
:return: a complete uri
"""
# Allow to raise CmdError if canonical_uri is failed
uri = virsh.canonical_uri(ignore_status=False)
driver = uri.split(":")[0]
# The libvirtd daemon's mode(system or session on qemu)
daemon_mode = uri.split("/")[-1]
complete_uri = "%s+ssh://%s/%s" % (driver, ip_address, daemon_mode)
return complete_uri
def get_uri_with_transport(uri_type='qemu', transport="", dest_ip=""):
"""
Return a URI to connect driver on dest with a specified transport.
:param origin_uri: The URI on dest used to connect itself directly.
:param transport: The transport type connect to dest.
:param dest_ip: The ip of destination.
"""
_type2uri_ = {'qemu': "qemu:///system",
'qemu_system': "qemu:///system",
'qemu_session': "qemu:///session",
'lxc': "lxc:///",
'xen': "xen:///",
'esx': "esx:///"}
try:
origin_uri = _type2uri_[uri_type]
except KeyError:
raise ValueError("Param uri_type = %s is not supported." % (uri_type))
# For example:
# ("qemu:///system")-->("qemu", "system")
# ("lxc:///")-->("lxc", "")
origin_uri_elems = origin_uri.split(":///")
transport_uri_driver = origin_uri_elems[0]
transport_uri_dest = origin_uri_elems[-1]
if transport:
transport_uri_driver = ("%s+%s" % (transport_uri_driver, transport))
transport_uri_dest = ("://%s/%s" % (dest_ip, transport_uri_dest))
return ("%s%s" % (transport_uri_driver, transport_uri_dest))
class VM(virt_vm.BaseVM):
"""
This class handles all basic VM operations for libvirt.
"""
def __init__(self, name, params, root_dir, address_cache, state=None):
"""
Initialize the object and set a few attributes.
:param name: The name of the object
:param params: A dict containing VM params
(see method make_create_command for a full description)
:param root_dir: Base directory for relative filenames
:param address_cache: A dict that maps MAC addresses to IP addresses
:param state: If provided, use this as self.__dict__
"""
if state:
self.__dict__ = state
else:
self.process = None
self.serial_ports = []
self.serial_console_log = None
self.serial_console = None
self.redirs = {}
self.vnc_port = None
self.vnc_autoport = True
self.pci_assignable = None
self.netdev_id = []
self.device_id = []
self.pci_devices = []
self.uuid = None
self.remote_sessions = []
self.spice_port = 8000
self.name = name
self.params = params
self.root_dir = root_dir
self.address_cache = address_cache
self.vnclisten = "0.0.0.0"
self.connect_uri = normalize_connect_uri(params.get("connect_uri",
"default"))
self.driver_type = virsh.driver(uri=self.connect_uri)
self.params['driver_type_' + self.name] = self.driver_type
# virtnet init depends on vm_type/driver_type being set w/in params
super(VM, self).__init__(name, params)
logging.info("Libvirt VM '%s', driver '%s', uri '%s'",
self.name, self.driver_type, self.connect_uri)
def is_lxc(self):
"""
Return True if VM is linux container.
"""
return (self.connect_uri and self.connect_uri.count("lxc"))
def is_qemu(self):
"""
Return True if VM is a qemu guest.
"""
return (self.connect_uri and self.connect_uri.count("qemu"))
def is_xen(self):
"""
Return True if VM is a xen guest.
"""
return (self.connect_uri and self.connect_uri.count("xen"))
def is_esx(self):
"""
Return True if VM is a esx guest.
"""
return (self.connect_uri and self.connect_uri.count("esx"))
def verify_alive(self):
"""
Make sure the VM is alive.
:raise VMDeadError: If the VM is dead
"""
if not self.is_alive():
raise virt_vm.VMDeadError("Domain %s is inactive" % self.name,
self.state())
def is_alive(self):
"""
Return True if VM is alive.
"""
return virsh.is_alive(self.name, uri=self.connect_uri)
def is_dead(self):
"""
Return True if VM is dead.
"""
return virsh.is_dead(self.name, uri=self.connect_uri)
def is_paused(self):
"""
Return True if VM is paused.
"""
return (self.state() == "paused")
def is_persistent(self):
"""
Return True if VM is persistent.
"""
try:
dominfo = (virsh.dominfo(self.name,
uri=self.connect_uri).stdout.strip())
return bool(re.search(r"^Persistent:\s+[Yy]es", dominfo,
re.MULTILINE))
except process.CmdError:
return False
def is_autostart(self):
"""
Return True if VM is autostart.
"""
try:
dominfo = (virsh.dominfo(self.name,
uri=self.connect_uri).stdout.strip())
return bool(re.search(r"^Autostart:\s+enable", dominfo,
re.MULTILINE))
except process.CmdError:
return False
def exists(self):
"""
Return True if VM exists.
"""
return virsh.domain_exists(self.name, uri=self.connect_uri)
def undefine(self):
"""
Undefine the VM.
"""
try:
virsh.undefine(self.name, uri=self.connect_uri,
ignore_status=False)
except process.CmdError, detail:
logging.error("Undefined VM %s failed:\n%s", self.name, detail)
return False
return True
def define(self, xml_file):
"""
Define the VM.
"""
if not os.path.exists(xml_file):
logging.error("File %s not found." % xml_file)
return False
try:
virsh.define(xml_file, uri=self.connect_uri,
ignore_status=False)
except process.CmdError, detail:
logging.error("Defined VM from %s failed:\n%s", xml_file, detail)
return False
return True
def state(self):
"""
Return domain state.
"""
return virsh.domstate(self.name, uri=self.connect_uri).stdout.strip()
def get_id(self):
"""
Return VM's ID.
"""
return virsh.domid(self.name, uri=self.connect_uri).stdout.strip()
def get_xml(self):
"""
Return VM's xml file.
"""
return virsh.dumpxml(self.name, uri=self.connect_uri).stdout.strip()
def backup_xml(self, active=False):
"""
Backup the guest's xmlfile.
"""
# Since backup_xml() is not a function for testing,
# we have to handle the exception here.
try:
xml_file = tempfile.mktemp(dir="/tmp")
if active:
extra = ""
else:
extra = "--inactive"
virsh.dumpxml(self.name, extra=extra,
to_file=xml_file, uri=self.connect_uri)
return xml_file
except Exception, detail:
if os.path.exists(xml_file):
os.remove(xml_file)
logging.error("Failed to backup xml file:\n%s", detail)
return ""
def clone(self, name=None, params=None, root_dir=None, address_cache=None,
copy_state=False):
"""
Return a clone of the VM object with optionally modified parameters.
The clone is initially not alive and needs to be started using create().
Any parameters not passed to this function are copied from the source
VM.
:param name: Optional new VM name
:param params: Optional new VM creation parameters
:param root_dir: Optional new base directory for relative filenames
:param address_cache: A dict that maps MAC addresses to IP addresses
:param copy_state: If True, copy the original VM's state to the clone.
Mainly useful for make_create_command().
"""
if name is None:
name = self.name
if params is None:
params = self.params.copy()
if root_dir is None:
root_dir = self.root_dir
if address_cache is None:
address_cache = self.address_cache
if copy_state:
state = self.__dict__.copy()
else:
state = None
return VM(name, params, root_dir, address_cache, state)
def make_create_command(self, name=None, params=None, root_dir=None):
"""
Generate a libvirt command line. All parameters are optional. If a
parameter is not supplied, the corresponding value stored in the
class attributes is used.
:param name: The name of the object
:param params: A dict containing VM params
:param root_dir: Base directory for relative filenames
:note: The params dict should contain:
mem -- memory size in MBs
cdrom -- ISO filename to use with the qemu -cdrom parameter
extra_params -- a string to append to the qemu command
shell_port -- port of the remote shell daemon on the guest
(SSH, Telnet or the home-made Remote Shell Server)
shell_client -- client program to use for connecting to the
remote shell daemon on the guest (ssh, telnet or nc)
x11_display -- if specified, the DISPLAY environment variable
will be be set to this value for the qemu process (useful for
SDL rendering)
images -- a list of image object names, separated by spaces
nics -- a list of NIC object names, separated by spaces
For each image in images:
drive_format -- string to pass as 'if' parameter for this
image (e.g. ide, scsi)
image_snapshot -- if yes, pass 'snapshot=on' to qemu for
this image
image_boot -- if yes, pass 'boot=on' to qemu for this image
In addition, all parameters required by get_image_filename.
For each NIC in nics:
nic_model -- string to pass as 'model' parameter for this
NIC (e.g. e1000)
"""
# helper function for command line option wrappers
def has_option(help_text, option):
return bool(re.search(r"--%s" % option, help_text, re.MULTILINE))
# Wrappers for all supported libvirt command line parameters.
# This is meant to allow support for multiple libvirt versions.
# Each of these functions receives the output of 'libvirt --help' as a
# parameter, and should add the requested command line option
# accordingly.
def add_name(help_text, name):
return " --name '%s'" % name
def add_machine_type(help_text, machine_type):
if has_option(help_text, "machine"):
return " --machine %s" % machine_type
else:
return ""
def add_hvm_or_pv(help_text, hvm_or_pv):
if hvm_or_pv == "hvm":
return " --hvm --accelerate"
elif hvm_or_pv == "pv":
return " --paravirt"
else:
logging.warning("Unknown virt type hvm_or_pv, using default.")
return ""
def add_mem(help_text, mem):
return " --ram=%s" % mem
def add_check_cpu(help_text):
if has_option(help_text, "check-cpu"):
return " --check-cpu"
else:
return ""
def add_smp(help_text, smp):
return " --vcpu=%s" % smp
def add_location(help_text, location):
if has_option(help_text, "location"):
return " --location %s" % location
else:
return ""
def add_cdrom(help_text, filename, index=None):
if has_option(help_text, "cdrom"):
return " --cdrom %s" % filename
else:
return ""
def add_pxe(help_text):
if has_option(help_text, "pxe"):
return " --pxe"
else:
return ""
def add_import(help_text):
if has_option(help_text, "import"):
return " --import"
else:
return ""
def add_controller(model=None):
"""
Add controller option for virt-install command line.
:param model: string, controller model.
:return: string, empty or controller option.
"""
if model == 'virtio-scsi':
return " --controller type=scsi,model=virtio-scsi"
else:
return ""
def check_controller(virt_install_cmd_line, controller):
"""
Check for the controller already available in virt-install
command line.
:param virt_install_cmd_line: string, virt-install command line.
:param controller: string, controller model.
:return: True if succeed of False if failed.
"""
found = False
output = re.findall(
r"controller\stype=(\S+),model=(\S+)", virt_install_cmd_line)
for item in output:
if controller in item[1]:
found = True
break
return found
def add_drive(help_text, filename, pool=None, vol=None, device=None,
bus=None, perms=None, size=None, sparse=False,
cache=None, fmt=None):
cmd = " --disk"
if filename:
cmd += " path=%s" % filename
elif pool:
if vol:
cmd += " vol=%s/%s" % (pool, vol)
else:
cmd += " pool=%s" % pool
if device:
cmd += ",device=%s" % device
if bus:
cmd += ",bus=%s" % bus
if perms:
cmd += ",%s" % perms
if size:
cmd += ",size=%s" % size.rstrip("Gg")
if sparse:
cmd += ",sparse=false"
if fmt:
cmd += ",format=%s" % fmt
if cache:
cmd += ",cache=%s" % cache
return cmd
def add_floppy(help_text, filename):
return " --disk path=%s,device=floppy,ro" % filename
def add_vnc(help_text, vnc_port=None):
if vnc_port:
return " --vnc --vncport=%d" % (vnc_port)
else:
return " --vnc"
def add_vnclisten(help_text, vnclisten):
if has_option(help_text, "vnclisten"):
return " --vnclisten=%s" % (vnclisten)
else:
return ""
def add_sdl(help_text):
if has_option(help_text, "sdl"):
return " --sdl"
else:
return ""
def add_nographic(help_text):
return " --nographics"
def add_video(help_text, video_device):
if has_option(help_text, "video"):
return " --video=%s" % (video_device)
else:
return ""
def add_uuid(help_text, uuid):
if has_option(help_text, "uuid"):
return " --uuid %s" % uuid
else:
return ""
def add_os_type(help_text, os_type):
if has_option(help_text, "os-type"):
return " --os-type %s" % os_type
else:
return ""
def add_os_variant(help_text, os_variant):
if has_option(help_text, "os-variant"):
return " --os-variant %s" % os_variant
else:
return ""
def add_pcidevice(help_text, pci_device):
if has_option(help_text, "host-device"):
return " --host-device %s" % pci_device
else:
return ""
def add_soundhw(help_text, sound_device):
if has_option(help_text, "soundhw"):
return " --soundhw %s" % sound_device
else:
return ""
def add_serial(help_text):
if has_option(help_text, "serial"):
return " --serial pty"
else:
return ""
def add_kernel_cmdline(help_text, cmdline):
return " -append %s" % cmdline
def add_connect_uri(help_text, uri):
if uri and has_option(help_text, "connect"):
return " --connect=%s" % uri
else:
return ""
def add_security(help_text, sec_type, sec_label=None, sec_relabel=None):
"""
Return security options for install command.
"""
if has_option(help_text, "security"):
result = " --security"
if sec_type == 'static':
if sec_label is None:
raise ValueError("Seclabel is not setted for static.")
result += " type=static,label=%s" % (sec_label)
elif sec_type == 'dynamic':
result += " type=dynamic"
else:
raise ValueError("Security type %s is not supported."
% sec_type)
if sec_relabel is not None:
result += ",relabel=%s" % sec_relabel
else:
result = ""
return result
def add_nic(help_text, nic_params):
"""
Return additional command line params based on dict-like nic_params
"""
mac = nic_params.get('mac')
nettype = nic_params.get('nettype')
netdst = nic_params.get('netdst')
nic_model = nic_params.get('nic_model')
if nettype:
result = " --network=%s" % nettype
else:
result = ""
if has_option(help_text, "bridge"):
# older libvirt (--network=NATdev --bridge=bridgename
# --mac=mac)
if nettype != 'user':
result += ':%s' % netdst
if mac: # possible to specify --mac w/o --network
result += " --mac=%s" % mac
else:
# newer libvirt (--network=mynet,model=virtio,mac=00:11)
if nettype != 'user':
result += '=%s' % netdst
if nettype and nic_model: # only supported along with nettype
result += ",model=%s" % nic_model
if nettype and mac:
result += ',mac=%s' % mac
elif mac: # possible to specify --mac w/o --network
result += " --mac=%s" % mac
logging.debug("vm.make_create_command.add_nic returning: %s",
result)
return result
def add_memballoon(help_text, memballoon_model):
"""
Adding memballoon device to the vm.
:param help_text: string, virt-install help text.
:param memballon_model: string, memballoon model.
:return: string, empty or memballoon model option.
"""
if has_option(help_text, "memballoon"):
result = " --memballoon model=%s" % memballoon_model
else:
logging.warning("memballoon is not supported")
result = ""
logging.debug("vm.add_memballoon returning: %s", result)
return result
# End of command line option wrappers
if name is None:
name = self.name
if params is None:
params = self.params
if root_dir is None:
root_dir = self.root_dir
# Clone this VM using the new params
vm = self.clone(name, params, root_dir, copy_state=True)
virt_install_binary = utils_misc.get_path(
root_dir,
params.get("virt_install_binary",
"virt-install"))
help_text = process.system_output("%s --help" % virt_install_binary,
verbose=False)
# Find all supported machine types, so we can rule out an unsupported
# machine type option passed in the configuration.
hvm_or_pv = params.get("hvm_or_pv", "hvm")
# default to 'uname -m' output
arch_name = params.get("vm_arch_name", platform.machine())
capabs = libvirt_xml.CapabilityXML()
try:
support_machine_type = capabs.guest_capabilities[
hvm_or_pv][arch_name]['machine']
except KeyError, detail:
if detail.args[0] == hvm_or_pv:
raise KeyError("No libvirt support for %s virtualization, "
"does system hardware + software support it?"
% hvm_or_pv)
elif detail.args[0] == arch_name:
raise KeyError("No libvirt support for %s virtualization of "
"%s, does system hardware + software support "
"it?" % (hvm_or_pv, arch_name))
raise
logging.debug("Machine types supported for %s/%s: %s",
hvm_or_pv, arch_name, support_machine_type)
# Start constructing the qemu command
virt_install_cmd = ""
# Set the X11 display parameter if requested
if params.get("x11_display"):
virt_install_cmd += "DISPLAY=%s " % params.get("x11_display")
# Add the qemu binary
virt_install_cmd += virt_install_binary
# set connect uri
virt_install_cmd += add_connect_uri(help_text, self.connect_uri)
# hvm or pv specified by libvirt switch (pv used by Xen only)
if hvm_or_pv:
virt_install_cmd += add_hvm_or_pv(help_text, hvm_or_pv)
# Add the VM's name
virt_install_cmd += add_name(help_text, name)
machine_type = params.get("machine_type")
if machine_type:
if machine_type in support_machine_type:
virt_install_cmd += add_machine_type(help_text, machine_type)
else:
raise exceptions.TestNAError("Unsupported machine type %s." %
(machine_type))
mem = params.get("mem")
if mem:
virt_install_cmd += add_mem(help_text, mem)
# TODO: should we do the check before we call ? negative case ?
check_cpu = params.get("use_check_cpu")
if check_cpu:
virt_install_cmd += add_check_cpu(help_text)
smp = params.get("smp")
if smp:
virt_install_cmd += add_smp(help_text, smp)
# TODO: directory location for vmlinuz/kernel for cdrom install ?
location = None
if params.get("medium") == 'url':
location = params.get('url')
elif params.get("medium") == 'kernel_initrd':
# directory location of kernel/initrd pair (directory layout must
# be in format libvirt will recognize)
location = params.get("image_dir")
elif params.get("medium") == 'nfs':
location = "nfs:%s:%s" % (params.get("nfs_server"),
params.get("nfs_dir"))
elif params.get("medium") == 'cdrom':
if params.get("use_libvirt_cdrom_switch") == 'yes':
virt_install_cmd += add_cdrom(
help_text, params.get("cdrom_cd1"))
elif params.get("unattended_delivery_method") == "integrated":
cdrom_path = os.path.join(data_dir.get_data_dir(),
params.get("cdrom_unattended"))
virt_install_cmd += add_cdrom(help_text, cdrom_path)
else:
location = data_dir.get_data_dir()
kernel_dir = os.path.dirname(params.get("kernel"))
kernel_parent_dir = os.path.dirname(kernel_dir)
pxeboot_link = os.path.join(kernel_parent_dir, "pxeboot")
if os.path.islink(pxeboot_link):
os.unlink(pxeboot_link)
if os.path.isdir(pxeboot_link):
logging.info("Removed old %s leftover directory",
pxeboot_link)
shutil.rmtree(pxeboot_link)
os.symlink(kernel_dir, pxeboot_link)
elif params.get("medium") == "import":
virt_install_cmd += add_import(help_text)
if location:
virt_install_cmd += add_location(help_text, location)
if params.get("display") == "vnc":
if params.get("vnc_autoport") == "yes":
vm.vnc_autoport = True
else:
vm.vnc_autoport = False
if not vm.vnc_autoport and params.get("vnc_port"):
vm.vnc_port = int(params.get("vnc_port"))
virt_install_cmd += add_vnc(help_text, vm.vnc_port)
if params.get("vnclisten"):
vm.vnclisten = params.get("vnclisten")
virt_install_cmd += add_vnclisten(help_text, vm.vnclisten)
elif params.get("display") == "sdl":
virt_install_cmd += add_sdl(help_text)
elif params.get("display") == "nographic":
virt_install_cmd += add_nographic(help_text)
video_device = params.get("video_device")
if video_device:
virt_install_cmd += add_video(help_text, video_device)
sound_device = params.get("sound_device")
if sound_device:
virt_install_cmd += add_soundhw(help_text, sound_device)
# if none is given a random UUID will be generated by libvirt
if params.get("uuid"):
virt_install_cmd += add_uuid(help_text, params.get("uuid"))
# selectable OS type
if params.get("use_os_type") == "yes":
virt_install_cmd += add_os_type(help_text, params.get("os_type"))
# selectable OS variant
if params.get("use_os_variant") == "yes":
virt_install_cmd += add_os_variant(
help_text, params.get("os_variant"))
# Add serial console
virt_install_cmd += add_serial(help_text)
# Add memballoon device
memballoon_model = params.get("memballoon_model")
if memballoon_model:
virt_install_cmd += add_memballoon(help_text, memballoon_model)
# If the PCI assignment step went OK, add each one of the PCI assigned
# devices to the command line.
if self.pci_devices:
for pci_id in self.pci_devices:
virt_install_cmd += add_pcidevice(help_text, pci_id)
for image_name in params.objects("images"):
image_params = params.object_params(image_name)
base_dir = image_params.get("images_base_dir",
data_dir.get_data_dir())
filename = storage.get_image_filename(image_params,
base_dir)
if image_params.get("use_storage_pool") == "yes":
filename = None
virt_install_cmd += add_drive(help_text,
filename,
image_params.get("image_pool"),
image_params.get("image_vol"),
image_params.get("image_device"),
image_params.get("image_bus"),
image_params.get("image_perms"),
image_params.get("image_size"),
image_params.get("drive_sparse"),
image_params.get("drive_cache"),
image_params.get("image_format"))
if image_params.get("boot_drive") == "no":
continue
if filename:
libvirt_controller = image_params.get(
"libvirt_controller", None)
_drive_format = image_params.get("drive_format")
if libvirt_controller:
if not check_controller(virt_install_cmd, libvirt_controller):
virt_install_cmd += add_controller(libvirt_controller)
# this will reset the scsi-hd to scsi as we are adding controller
# to mention the drive format
if 'scsi' in _drive_format:
_drive_format = "scsi"
virt_install_cmd += add_drive(help_text,
filename,
None,
None,
None,
_drive_format,
None,
image_params.get("image_size"),
image_params.get("drive_sparse"),
image_params.get("drive_cache"),
image_params.get("image_format"))
unattended_integrated = (params.get('unattended_delivery_method') !=
'integrated')
xen_pv = self.driver_type == 'xen' and params.get('hvm_or_pv') == 'pv'
if unattended_integrated and not xen_pv:
for cdrom in params.objects("cdroms"):
cdrom_params = params.object_params(cdrom)
iso = cdrom_params.get("cdrom")
if params.get("use_libvirt_cdrom_switch") == 'yes':
# we don't want to skip the winutils iso
if not cdrom == 'winutils':
logging.debug(
"Using --cdrom instead of --disk for install")
logging.debug("Skipping CDROM:%s:%s", cdrom, iso)
continue
if params.get("medium") == 'cdrom_no_kernel_initrd':
if iso == params.get("cdrom_cd1"):
logging.debug("Using cdrom or url for install")
logging.debug("Skipping CDROM: %s", iso)
continue
if iso:
iso_path = utils_misc.get_path(root_dir, iso)
iso_image_pool = image_params.get("iso_image_pool")
iso_image_vol = image_params.get("iso_image_vol")
virt_install_cmd += add_drive(help_text,
iso_path,
iso_image_pool,
virt_install_cmd,
'cdrom',
None,
None,
None,
None,
None,
None)
# We may want to add {floppy_otps} parameter for -fda
# {fat:floppy:}/path/. However vvfat is not usually recommended.
# Only support to add the main floppy if you want to add the second
# one please modify this part.
floppy = params.get("floppy_name")
if floppy:
floppy = utils_misc.get_path(data_dir.get_data_dir(), floppy)
virt_install_cmd += add_drive(help_text, floppy,
None,
None,
'floppy',
None,
None,
None,
None,
None,
None)
# setup networking parameters
for nic in vm.virtnet:
# make_create_command can be called w/o vm.create()
nic = vm.add_nic(**dict(nic))
logging.debug("make_create_command() setting up command for"
" nic: %s" % str(nic))
virt_install_cmd += add_nic(help_text, nic)
if params.get("use_no_reboot") == "yes":
virt_install_cmd += " --noreboot"
if params.get("use_autostart") == "yes":
virt_install_cmd += " --autostart"
if params.get("virt_install_debug") == "yes":
virt_install_cmd += " --debug"
# bz still open, not fully functional yet
if params.get("use_virt_install_wait") == "yes":
virt_install_cmd += (" --wait %s" %
params.get("virt_install_wait_time"))
kernel_params = params.get("kernel_params")
if kernel_params:
virt_install_cmd += " --extra-args '%s'" % kernel_params
virt_install_cmd += " --noautoconsole"
sec_type = params.get("sec_type", None)
if sec_type:
sec_label = params.get("sec_label", None)
sec_relabel = params.get("sec_relabel", None)
virt_install_cmd += add_security(help_text, sec_type=sec_type,
sec_label=sec_label,
sec_relabel=sec_relabel)
return virt_install_cmd
def get_serial_console_filename(self, name):
"""
Return the serial console filename.
:param name: The serial port name.
"""
return "serial-%s-%s-%s.log" % (name, self.name,
utils_misc.generate_random_string(4))
def get_serial_console_filenames(self):
"""
Return a list of all serial console filenames
(as specified in the VM's params).
"""
return [self.get_serial_console_filename(_) for _ in
self.params.objects("serials")]
def create_serial_console(self):
"""
Establish a session with the serial console.
The libvirt version uses virsh console to manage it.
"""
if not self.serial_ports:
for serial in self.params.objects("serials"):
self.serial_ports.append(serial)
if self.serial_console is None:
try:
cmd = 'virsh'
if self.connect_uri:
cmd += ' -c %s' % self.connect_uri
cmd += (" console %s %s" % (self.name, self.serial_ports[0]))
except IndexError:
raise virt_vm.VMConfigMissingError(self.name, "serial")
output_func = utils_misc.log_line # Because qemu-kvm uses this
# Because qemu-kvm hard-codes this
output_filename = self.get_serial_console_filename(self.serial_ports[0])
output_params = (output_filename,)
prompt = self.params.get("shell_prompt", "[\#\$]")
self.serial_console = aexpect.ShellSession(command=cmd, auto_close=False,
output_func=output_func,
output_params=output_params)
# Cause serial_console.close() to close open log file
self.serial_console.set_log_file(output_filename)
self.serial_console_log = os.path.join(utils_misc.get_log_file_dir(),
output_filename)
def set_root_serial_console(self, device, remove=False):
"""
Allow or ban root to login through serial console.
:param device: device to set root login
:param allow_root: do remove operation
"""
try:
session = self.login()
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
else:
try:
securetty_output = session.cmd_output("cat /etc/securetty")
devices = str(securetty_output).strip().splitlines()
if device not in devices:
if not remove:
session.sendline("echo %s >> /etc/securetty" % device)
else:
if remove:
session.sendline("sed -i -e /%s/d /etc/securetty"
% device)
logging.debug("Set root login for %s successfully.", device)
return True
finally:
session.close()
logging.debug("Set root login for %s failed.", device)
return False
def set_kernel_console(self, device, speed=None, remove=False):
"""
Set kernel parameter for given console device.
:param device: a console device
:param speed: speed of serial console
:param remove: do remove operation
"""
try:
session = self.login()
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
else:
try:
grub = "/boot/grub/grub.conf"
if not session.cmd_status("ls /boot/grub2/grub.cfg"):
grub = "/boot/grub2/grub.cfg"
kernel_params = "console=%s" % device
if speed is not None:
kernel_params += ",%s" % speed
output = session.cmd_output("cat %s" % grub)
if not re.search("console=%s" % device, output):
if not remove:
session.sendline("sed -i -e \'s/vmlinuz-.*/& %s/g\'"
" %s; sync" % (kernel_params, grub))
else:
if remove:
session.sendline("sed -i -e \'s/console=%s\w*\s//g\'"
" %s; sync" % (device, grub))
logging.debug("Set kernel params for %s successfully.", device)
return True
finally:
session.close()
logging.debug("Set kernel params for %s failed.", device)
return False
def set_kernel_param(self, parameter, value=None, remove=False):
"""
Set a specific kernel parameter.
:param option: A kernel parameter to set.
:param value: The value of the parameter to be set.
:param remove: Remove the parameter if True.
:return: True if succeed of False if failed.
"""
if self.is_dead():
logging.error("Can't set kernel param on a dead VM.")
return False
session = self.wait_for_login()
try:
grub_paths = [
'/etc/grub.conf',
'/etc/grub2.cfg',
'/boot/grub/grub.conf',
'/boot/grub2/grub.cfg',
]
grub_path = ''
for path in grub_paths:
if not session.cmd_status("ls %s" % path):
grub_path = path
break
if not grub_path:
logging.error("Failed to locate grub config file "
"in %s." % grub_paths)
return False
grub_text = session.cmd_output("cat %s" % grub_path)
kernel_lines = [l.strip() for l in grub_text.splitlines()
if re.match(r"\s*(linux|kernel).*", l)]
if not kernel_lines:
logging.error("Can't find any kernel lines in grub "
"file %s:\n%s" % (grub_path, grub_text))
return False
for line in kernel_lines:
line = line.replace('\t', r'\t')
if remove:
new_string = ""
else:
if value is None:
new_string = parameter
else:
new_string = "%s=%s" % (parameter, value)
patts = [
"\s+(%s=\S*)(\s|$)" % parameter,
"\s+(%s)(\s|$)" % parameter,
]
old_string = ""
for patt in patts:
res = re.search(patt, line)
if res:
old_string = res.group(1)
break
if old_string:
new_line = line.replace(old_string, new_string)
else:
new_line = " ".join((line, new_string))
line_patt = "\s*".join(line.split())
logging.debug("Substituting grub line '%s' to '%s'." %
(line, new_line))
stat_sed, output = session.cmd_status_output(
"sed -i --follow-symlinks -e \"s@%s@%s@g\" %s" %
(line_patt, new_line, grub_path))
if stat_sed:
logging.error("Failed to substitute grub file:\n%s" %
output)
return False
if remove:
logging.debug("Remove kernel params %s successfully.",
parameter)
else:
logging.debug("Set kernel params %s to %s successfully.",
parameter, value)
return True
finally:
session.close()
def has_swap(self):
"""
Check if there is any active swap partition/file.
:return : True if swap is on or False otherwise.
"""
if self.is_dead():
logging.error("Can't check swap on a dead VM.")
return False
session = self.wait_for_login()
try:
cmd = "swapon -s"
output = session.cmd_output(cmd)
if output.strip():
return True
return False
finally:
session.close()
def create_swap_partition(self, swap_path=None):
"""
Make a swap partition and active it.
A cleanup_swap() should be call after use to clean up
the environment changed.
:param swap_path: Swap image path.
"""
if self.is_dead():
logging.error("Can't create swap on a dead VM.")
return False
if not swap_path:
swap_path = os.path.join(data_dir.get_tmp_dir(), "swap_image")
swap_size = self.get_used_mem()
process.run("qemu-img create %s %s" % (swap_path, swap_size * 1024))
self.created_swap_path = swap_path
device = self.attach_disk(swap_path, extra="--persistent")
session = self.wait_for_login()
try:
dev_path = "/dev/" + device
session.cmd_status("mkswap %s" % dev_path)
session.cmd_status("swapon %s" % dev_path)
self.set_kernel_param("resume", dev_path)
return True
finally:
session.close()
logging.error("Failed to create a swap partition.")
return False
def create_swap_file(self, swapfile='/swapfile'):
"""
Make a swap file and active it through a session.
A cleanup_swap() should be call after use to clean up
the environment changed.
:param swapfile: Swap file path in VM to be created.
"""
if self.is_dead():
logging.error("Can't create swap on a dead VM.")
return False
session = self.wait_for_login()
try:
# Get memory size.
swap_size = self.get_used_mem() / 1024
# Create, change permission, and make a swap file.
cmd = ("dd if=/dev/zero of={1} bs=1M count={0} && "
"chmod 600 {1} && "
"mkswap {1}".format(swap_size, swapfile))
stat_create, output = session.cmd_status_output(cmd)
if stat_create:
logging.error("Fail to create swap file in guest."
"\n%s" % output)
return False
self.created_swap_file = swapfile
# Get physical swap file offset for kernel param resume_offset.
cmd = "filefrag -v %s" % swapfile
output = session.cmd_output(cmd)
# For compatibility of different version of filefrag
# Sample output of 'filefrag -v /swapfile'
# On newer version:
# Filesystem type is: 58465342
# File size of /swapfile is 1048576000 (256000 blocks of 4096 bytes)
# ext: logical_offset: physical_offset: length: expected: flags:
# 0: 0.. 65519: 395320.. 460839: 65520:
# ...
# On older version:
# Filesystem type is: ef53
# File size of /swapfile is 1048576000 (256000 blocks, blocksize 4096)
# ext logical physical expected length flags
# 0 0 2465792 32768
# ...
offset_line = output.splitlines()[3]
if '..' in offset_line:
offset = offset_line.split()[3].rstrip('..')
else:
offset = offset_line.split()[2]
# Get physical swap file device for kernel param resume.
cmd = "df %s" % swapfile
output = session.cmd_output(cmd)
# Sample output of 'df /swapfile':
# Filesystem 1K-blocks Used Available Use% Mounted on
#/dev/vdb 52403200 15513848 36889352 30% /
device = output.splitlines()[1].split()[0]
# Set kernel parameters.
self.set_kernel_param("resume", device)
self.set_kernel_param("resume_offset", offset)
finally:
session.close()
self.reboot()
session = self.wait_for_login()
try:
# Activate a swap file.
cmd = "swapon %s" % swapfile
stat_swapon, output = session.cmd_status_output(cmd)
if stat_create:
logging.error("Fail to activate swap file in guest."
"\n%s" % output)
return False
finally:
session.close()
if self.has_swap():
logging.debug("Successfully created swapfile %s." % swapfile)
return True
else:
logging.error("Failed to create swap file.")
return False
def cleanup_swap(self):
"""
Cleanup environment changed by create_swap_partition() or
create_swap_file().
"""
if self.is_dead():
logging.error("Can't cleanup swap on a dead VM.")
return False
# Remove kernel parameters.
self.set_kernel_param("resume", remove=True)
self.set_kernel_param("resume_offset", remove=True)
# Deactivate swap partition/file.
session = self.wait_for_login()
try:
session.cmd_status("swapoff -a")
if "created_swap_file" in dir(self):
session.cmd_status("rm -f %s" % self.created_swap_file)
del self.created_swap_file
finally:
session.close()
# Cold unplug attached swap disk
if self.shutdown():
if "created_swap_device" in dir(self):
self.detach_disk(
self.created_swap_device, extra="--persistent")
del self.created_swap_device
if "created_swap_path" in dir(self):
os.remove(self.created_swap_path)
del self.created_swap_path
def set_console_getty(self, device, getty="mgetty", remove=False):
"""
Set getty for given console device.
:param device: a console device
:param getty: getty type: agetty, mgetty and so on.
:param remove: do remove operation
"""
try:
session = self.login()
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
else:
try:
# Only configurate RHEL5 and below
regex = "gettys are handled by"
# As of RHEL7 systemd message is displayed
regex += "|inittab is no longer used when using systemd"
output = session.cmd_output("cat /etc/inittab")
if re.search(regex, output):
logging.debug("Skip setting inittab for %s", device)
return True
getty_str = "co:2345:respawn:/sbin/%s %s" % (getty, device)
matched_str = "respawn:/sbin/*getty %s" % device
if not re.search(matched_str, output):
if not remove:
session.sendline("echo %s >> /etc/inittab" % getty_str)
else:
if remove:
session.sendline("sed -i -e /%s/d "
"/etc/inittab" % matched_str)
logging.debug("Set inittab for %s successfully.", device)
return True
finally:
session.close()
logging.debug("Set inittab for %s failed.", device)
return False
def cleanup_serial_console(self):
"""
Close serial console and associated log file
"""
if self.serial_console is not None:
if self.is_lxc():
self.serial_console.sendline("^]")
self.serial_console.close()
self.serial_console = None
self.serial_console_log = None
if hasattr(self, "migration_file"):
try:
os.unlink(self.migration_file)
except OSError:
pass
def wait_for_login(self, nic_index=0, timeout=None,
internal_timeout=None,
serial=False, restart_network=False,
username=None, password=None):
"""
Override the wait_for_login method of virt_vm to support other
guest in libvirt.
If connect_uri is lxc related, we call wait_for_serial_login()
directly, without attempting login it via network.
Other connect_uri, call virt_vm.wait_for_login().
"""
# Set the default value of parameters if user did not use it.
if not timeout:
timeout = super(VM, self).LOGIN_WAIT_TIMEOUT
if not internal_timeout:
internal_timeout = super(VM, self).LOGIN_TIMEOUT
if self.is_lxc():
self.cleanup_serial_console()
self.create_serial_console()
return self.wait_for_serial_login(timeout, internal_timeout,
restart_network,
username, password)
return super(VM, self).wait_for_login(nic_index, timeout,
internal_timeout,
serial, restart_network,
username, password)
@error_context.context_aware
def create(self, name=None, params=None, root_dir=None, timeout=5.0,
migration_mode=None, mac_source=None, autoconsole=True):
"""
Start the VM by running a qemu command.
All parameters are optional. If name, params or root_dir are not
supplied, the respective values stored as class attributes are used.
:param name: The name of the object
:param params: A dict containing VM params
:param root_dir: Base directory for relative filenames
:param migration_mode: If supplied, start VM for incoming migration
using this protocol (either 'tcp', 'unix' or 'exec')
:param migration_exec_cmd: Command to embed in '-incoming "exec: ..."'
(e.g. 'gzip -c -d filename') if migration_mode is 'exec'
:param mac_source: A VM object from which to copy MAC addresses. If not
specified, new addresses will be generated.
:raise VMCreateError: If qemu terminates unexpectedly
:raise VMKVMInitError: If KVM initialization fails
:raise VMHugePageError: If hugepage initialization fails
:raise VMImageMissingError: If a CD image is missing
:raise VMHashMismatchError: If a CD image hash has doesn't match the
expected hash
:raise VMBadPATypeError: If an unsupported PCI assignment type is
requested
:raise VMPAError: If no PCI assignable devices could be assigned
"""
error_context.context("creating '%s'" % self.name)
self.destroy(free_mac_addresses=False)
if name is not None:
self.name = name
if params is not None:
self.params = params
if root_dir is not None:
self.root_dir = root_dir
name = self.name
params = self.params
root_dir = self.root_dir
# Verify the md5sum of the ISO images
for cdrom in params.objects("cdroms"):
if params.get("medium") == "import":
break
cdrom_params = params.object_params(cdrom)
iso = cdrom_params.get("cdrom")
xen_pv = (self.driver_type == 'xen' and
params.get('hvm_or_pv') == 'pv')
iso_is_ks = os.path.basename(iso) == 'ks.iso'
if xen_pv and iso_is_ks:
continue
if iso:
iso = utils_misc.get_path(data_dir.get_data_dir(), iso)
if not os.path.exists(iso):
raise virt_vm.VMImageMissingError(iso)
compare = False
if cdrom_params.get("skip_hash"):
logging.debug("Skipping hash comparison")
elif cdrom_params.get("md5sum_1m"):
logging.debug("Comparing expected MD5 sum with MD5 sum of "
"first MB of ISO file...")
actual_hash = crypto.hash_file(
iso, 1048576, algorithm="md5")
expected_hash = cdrom_params.get("md5sum_1m")
compare = True
elif cdrom_params.get("md5sum"):
logging.debug("Comparing expected MD5 sum with MD5 sum of "
"ISO file...")
actual_hash = crypto.hash_file(iso, algorithm="md5")
expected_hash = cdrom_params.get("md5sum")
compare = True
elif cdrom_params.get("sha1sum"):
logging.debug("Comparing expected SHA1 sum with SHA1 sum "
"of ISO file...")
actual_hash = crypto.hash_file(iso, algorithm="sha1")
expected_hash = cdrom_params.get("sha1sum")
compare = True
if compare:
if actual_hash == expected_hash:
logging.debug("Hashes match")
else:
raise virt_vm.VMHashMismatchError(actual_hash,
expected_hash)
# Make sure the following code is not executed by more than one thread
# at the same time
lockfile = open("/tmp/libvirt-autotest-vm-create.lock", "w+")
fcntl.lockf(lockfile, fcntl.LOCK_EX)
try:
# Handle port redirections
redir_names = params.objects("redirs")
host_ports = utils_misc.find_free_ports(
5000, 6000, len(redir_names))
self.redirs = {}
for i in range(len(redir_names)):
redir_params = params.object_params(redir_names[i])
guest_port = int(redir_params.get("guest_port"))
self.redirs[guest_port] = host_ports[i]
# Find available PCI devices
self.pci_devices = []
for device in params.objects("pci_devices"):
self.pci_devices.append(device)
# Find available VNC port, if needed
if params.get("display") == "vnc":
if params.get("vnc_autoport") == "yes":
self.vnc_port = None
self.vnc_autoport = True
else:
self.vnc_port = utils_misc.find_free_port(5900, 6100)
self.vnc_autoport = False
# Find available spice port, if needed
if params.get("spice"):
self.spice_port = utils_misc.find_free_port(8000, 8100)
# Find random UUID if specified 'uuid = random' in config file
if params.get("uuid") == "random":
f = open("/proc/sys/kernel/random/uuid")
self.uuid = f.read().strip()
f.close()
# Generate or copy MAC addresses for all NICs
for nic in self.virtnet:
nic_params = dict(nic)
if mac_source is not None:
# Will raise exception if source doesn't
# have corresponding nic
logging.debug("Copying mac for nic %s from VM %s",
nic.nic_name, mac_source.name)
nic_params['mac'] = mac_source.get_mac_address(
nic.nic_name)
# make_create_command() calls vm.add_nic (i.e. on a copy)
nic = self.add_nic(**nic_params)
logging.debug('VM.create activating nic %s' % nic)
self.activate_nic(nic.nic_name)
# Make qemu command
install_command = self.make_create_command()
logging.info("Running libvirt command (reformatted):")
for item in install_command.replace(" -", " \n -").splitlines():
logging.info("%s", item)
try:
process.run(install_command, verbose=True, shell=True)
except process.CmdError, details:
stderr = details.result.stderr.strip()
# This is a common newcomer mistake, be more helpful...
if stderr.count('IDE CDROM must use'):
testname = params.get('name', "")
if testname.count('unattended_install.cdrom'):
if not testname.count('http_ks'):
e_msg = ("Install command "
"failed:\n%s \n\nNote: "
"Older versions of "
"libvirt won't work "
"properly with kickstart "
"on cdrom install. "
"Try using the "
"unattended_install.cdrom.http_ks method "
"instead." % details.result_obj)
raise exceptions.TestNAError(e_msg)
if stderr.count('failed to launch bridge helper'):
if utils_selinux.is_enforcing():
raise exceptions.TestNAError("SELinux is enabled and "
"preventing the bridge "
"helper from accessing "
"the bridge. Consider "
"running as root or "
"placing SELinux into "
"permissive mode.")
# some other problem happened, raise normally
raise
# Wait for the domain to be created
utils_misc.wait_for(func=self.is_alive, timeout=60,
text=("waiting for domain %s to start" %
self.name))
self.uuid = virsh.domuuid(self.name,
uri=self.connect_uri).stdout.strip()
# Create isa serial ports.
self.create_serial_console()
finally:
fcntl.lockf(lockfile, fcntl.LOCK_UN)
lockfile.close()
def migrate(self, dest_uri="", option="--live --timeout 60", extra="",
ignore_status=False, debug=False):
"""
Migrate a VM to a remote host.
:param dest_uri: Destination libvirt URI
:param option: Migration options before <domain> <desturi>
:param extra: Migration options after <domain> <desturi>
:return: True if command succeeded
"""
logging.info("Migrating VM %s from %s to %s" %
(self.name, self.connect_uri, dest_uri))
result = virsh.migrate(self.name, dest_uri, option,
extra, uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug)
# Close down serial_console logging process
self.cleanup_serial_console()
# On successful migration, point to guests new hypervisor.
# Since dest_uri could be None, checking it is necessary.
if result.exit_status == 0 and dest_uri:
self.connect_uri = dest_uri
self.create_serial_console()
return result
def attach_disk(self, source, target=None, prefix="vd", extra="",
ignore_status=False, debug=False):
"""
Attach a disk to VM and return the target device name.
:param source: source of disk device
:param target: target of disk device, None for automatic assignment.
:param prefix: disk device prefix.
:param extra: additional arguments to command
:return: target device name if successed
"""
# Find the next available target device name.
if target is None:
disks = self.get_disk_devices()
for ch in string.ascii_lowercase:
target = prefix + ch
if target not in disks:
break
virsh.attach_disk(self.name, source, target, extra,
uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug)
return target
def detach_disk(self, target, extra="",
ignore_status=False, debug=False):
"""
Detach a disk from VM.
:param target: target of disk device need to be detached.
:param extra: additional arguments to command
"""
return virsh.detach_disk(self.name, target, extra,
uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug)
def attach_interface(self, option="", ignore_status=False,
debug=False):
"""
Attach a NIC to VM.
"""
return virsh.attach_interface(self.name, option,
uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug)
def detach_interface(self, option="", ignore_status=False,
debug=False):
"""
Detach a NIC from VM.
"""
return virsh.detach_interface(self.name, option,
uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug)
def destroy(self, gracefully=True, free_mac_addresses=True):
"""
Destroy the VM.
If gracefully is True, first attempt to shutdown the VM with a shell
command. If that fails, send SIGKILL to the qemu process.
:param gracefully: If True, an attempt will be made to end the VM
using a shell command before trying to end the qemu process
with a 'quit' or a kill signal.
:param free_mac_addresses: If vm is undefined with libvirt, also
release/reset associated mac address
"""
try:
# Is it already dead?
if self.is_alive():
logging.debug("Destroying VM")
if self.is_paused():
self.resume()
if (not self.is_lxc() and gracefully and
self.params.get("shutdown_command")):
# Try to destroy with shell command
logging.debug("Trying to shutdown VM with shell command")
try:
session = self.login()
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
else:
try:
# Send the shutdown command
session.sendline(
self.params.get("shutdown_command"))
logging.debug("Shutdown command sent; waiting for VM "
"to go down...")
if utils_misc.wait_for(self.is_dead, 60, 1, 1):
logging.debug("VM is down")
return
finally:
session.close()
# Destroy VM directly, as 'ignore_status=True' by default, so destroy
# a shutoff domain is also acceptable here.
destroy_opt = ''
if gracefully:
destroy_opt = '--graceful'
virsh.destroy(self.name, destroy_opt, uri=self.connect_uri)
finally:
self.cleanup_serial_console()
if free_mac_addresses:
if self.is_persistent():
logging.warning("Requested MAC address release from "
"persistent vm %s. Ignoring." % self.name)
else:
logging.debug("Releasing MAC addresses for vm %s." % self.name)
for nic_name in self.virtnet.nic_name_list():
self.virtnet.free_mac_address(nic_name)
def remove(self):
self.destroy(gracefully=True, free_mac_addresses=False)
if not self.undefine():
raise virt_vm.VMRemoveError("VM '%s' undefine error" % self.name)
self.destroy(gracefully=False, free_mac_addresses=True)
logging.debug("VM '%s' was removed", self.name)
def remove_with_storage(self):
"""
Virsh undefine provides an option named --remove-all-storage, but it
only removes the storage which is managed by libvirt.
This method undefines vm and removes the all storages related with this
vm, no matter storages are managed by libvirt or not.
"""
blklist = self.get_disk_devices().values()
self.remove()
for blk in blklist:
path = blk['source']
if os.path.exists(path):
os.remove(path)
def get_uuid(self):
"""
Return VM's UUID.
"""
uuid = virsh.domuuid(self.name, uri=self.connect_uri).stdout.strip()
# only overwrite it if it's not set
if self.uuid is None:
self.uuid = uuid
return self.uuid
def get_ifname(self, nic_index=0):
raise NotImplementedError
def get_virsh_mac_address(self, nic_index=0):
"""
Get the MAC of this VM domain.
:param nic_index: Index of the NIC
:raise VMMACAddressMissingError: If no MAC address is defined for the
requested NIC
"""
cmd_result = virsh.dumpxml(self.name, uri=self.connect_uri)
if cmd_result.exit_status:
raise exceptions.TestFail("dumpxml %s failed.\n"
"Detail: %s.\n" % (self.name, cmd_result))
thexml = cmd_result.stdout.strip()
xtf = xml_utils.XMLTreeFile(thexml)
interfaces = xtf.find('devices').findall('interface')
# Range check
try:
mac = interfaces[nic_index].find('mac').get('address')
if mac is not None:
return mac
except IndexError:
pass # Allow other exceptions through
# IndexError (range check) or mac is None
raise virt_vm.VMMACAddressMissingError(nic_index)
def get_pid(self):
"""
Return the VM's PID.
:return: int with PID. If VM is not alive, returns None.
"""
if self.is_lxc():
pid_file = "/var/run/libvirt/lxc/%s.pid" % self.name
elif self.is_qemu():
pid_file = "/var/run/libvirt/qemu/%s.pid" % self.name
elif self.is_esx():
pid_file = "/var/run/libvirt/esx/%s.pid" % self.name
# TODO: Add more vm driver type
else:
raise ValueError("Unsupport connect uri: %s." % self.connect_uri)
pid = None
if os.path.exists(pid_file):
try:
pid_file_contents = open(pid_file).read()
pid = int(pid_file_contents)
except IOError:
logging.error("Could not read %s to get PID", pid_file)
except TypeError:
logging.error("PID file %s has invalid contents: '%s'",
pid_file, pid_file_contents)
else:
logging.debug("PID file %s not present", pid_file)
return pid
def get_vcpus_pid(self):
"""
Return the vcpu's pid for a given VM.
:return: list of PID of vcpus of a VM.
"""
output = virsh.qemu_monitor_command(self.name, "info cpus", "--hmp",
uri=self.connect_uri)
vcpu_pids = re.findall(r'thread_id=(\d+)', output.stdout)
return vcpu_pids
def get_shell_pid(self):
"""
Return the PID of the parent shell process.
:note: This works under the assumption that ``self.process.get_pid()``
returns the PID of the parent shell process.
"""
return self.process.get_pid()
def get_shared_meminfo(self):
"""
Returns the VM's shared memory information.
:return: Shared memory used by VM (MB)
"""
if self.is_dead():
logging.error("Could not get shared memory info from dead VM.")
return None
filename = "/proc/%d/statm" % self.get_pid()
shm = int(open(filename).read().split()[2])
# statm stores informations in pages, translate it to MB
return shm * 4.0 / 1024
def get_cpu_topology_in_cmdline(self):
"""
Return the VM's cpu topology in VM cmdline.
:return: A dirt of cpu topology
"""
cpu_topology = {}
vm_pid = self.get_pid()
if vm_pid is None:
logging.error("Fail to get VM pid")
else:
cmdline = open("/proc/%d/cmdline" % vm_pid).read()
values = re.findall("sockets=(\d+),cores=(\d+),threads=(\d+)",
cmdline)[0]
cpu_topology = dict(zip(["sockets", "cores", "threads"], values))
return cpu_topology
def get_cpu_topology_in_vm(self):
cpu_topology = {}
cpu_info = utils_misc.get_cpu_info(self.wait_for_login())
if cpu_info:
cpu_topology['sockets'] = cpu_info['Socket(s)']
cpu_topology['cores'] = cpu_info['Core(s) per socket']
cpu_topology['threads'] = cpu_info['Thread(s) per core']
return cpu_topology
def activate_nic(self, nic_index_or_name):
# TODO: Implement nic hotplugging
pass # Just a stub for now
def deactivate_nic(self, nic_index_or_name):
# TODO: Implement nic hot un-plugging
pass # Just a stub for now
@error_context.context_aware
def reboot(self, session=None, method="shell", nic_index=0, timeout=240,
serial=False):
"""
Reboot the VM and wait for it to come back up by trying to log in until
timeout expires.
:param session: A shell session object or None.
:param method: Reboot method. Can be "shell" (send a shell reboot
command).
:param nic_index: Index of NIC to access in the VM, when logging in
after rebooting.
:param timeout: Time to wait for login to succeed (after rebooting).
:param serial: Just use to unify api in virt_vm module.
:return: A new shell session object.
"""
error_context.base_context("rebooting '%s'" % self.name, logging.info)
error_context.context("before reboot")
session = session or self.login(timeout=timeout)
error_context.context()
if method == "shell":
session.sendline(self.params.get("reboot_command"))
else:
raise virt_vm.VMRebootError("Unknown reboot method: %s" % method)
error_context.context("waiting for guest to go down", logging.info)
if not utils_misc.wait_for(lambda: not
session.is_responsive(timeout=30),
120, 0, 1):
raise virt_vm.VMRebootError("Guest refuses to go down")
session.close()
error_context.context("logging in after reboot", logging.info)
return self.wait_for_login(nic_index, timeout=timeout)
def screendump(self, filename, debug=False):
if debug:
logging.debug("Requesting screenshot %s" % filename)
return virsh.screenshot(self.name, filename, uri=self.connect_uri)
def start(self, autoconsole=True):
"""
Starts this VM.
"""
self.uuid = virsh.domuuid(self.name,
uri=self.connect_uri).stdout.strip()
logging.debug("Starting vm '%s'", self.name)
result = virsh.start(self.name, uri=self.connect_uri)
if not result.exit_status:
# Wait for the domain to be created
has_started = utils_misc.wait_for(func=self.is_alive, timeout=60,
text=("waiting for domain %s "
"to start" % self.name))
if has_started is None:
raise virt_vm.VMStartError(self.name, "libvirt domain not "
"active after start")
self.uuid = virsh.domuuid(self.name,
uri=self.connect_uri).stdout.strip()
# Establish a session with the serial console
if autoconsole:
self.create_serial_console()
else:
raise virt_vm.VMStartError(self.name, result.stderr.strip())
# Pull in mac addresses from libvirt guest definition
for index, nic in enumerate(self.virtnet):
try:
mac = self.get_virsh_mac_address(index)
if not nic.has_key('mac'):
logging.debug("Updating nic %d with mac %s on vm %s"
% (index, mac, self.name))
nic.mac = mac
elif nic.mac != mac:
logging.warning("Requested mac %s doesn't match mac %s "
"as defined for vm %s", nic.mac, mac,
self.name)
# TODO: Checkout/Set nic_model, nettype, netdst also
except virt_vm.VMMACAddressMissingError:
logging.warning("Nic %d requested by test but not defined for"
" vm %s" % (index, self.name))
def wait_for_shutdown(self, count=60):
"""
Return True on successful domain shutdown.
Wait for a domain to shutdown, libvirt does not block on domain
shutdown so we need to watch for successful completion.
:param name: VM name
:param name: Optional timeout value
"""
timeout = count
while count > 0:
# check every 5 seconds
if count % 5 == 0:
if virsh.is_dead(self.name, uri=self.connect_uri):
logging.debug("Shutdown took %d seconds", timeout - count)
return True
count -= 1
time.sleep(1)
logging.debug("Waiting for guest to shutdown %d", count)
return False
def shutdown(self):
"""
Shuts down this VM.
"""
try:
if self.state() != 'shut off':
virsh.shutdown(self.name, uri=self.connect_uri)
if self.wait_for_shutdown():
logging.debug("VM %s shut down", self.name)
self.cleanup_serial_console()
return True
else:
logging.error("VM %s failed to shut down", self.name)
return False
except process.CmdError:
logging.error("VM %s failed to shut down", self.name)
return False
def pause(self):
try:
state = self.state()
if state != 'paused':
virsh.suspend(
self.name, uri=self.connect_uri, ignore_statues=False)
return True
except:
logging.error("VM %s failed to suspend", self.name)
return False
def resume(self):
try:
virsh.resume(self.name, ignore_status=False, uri=self.connect_uri)
if self.is_alive():
logging.debug("Resumed VM %s", self.name)
return True
else:
return False
except process.CmdError, detail:
logging.error("Resume VM %s failed:\n%s", self.name, detail)
return False
def save_to_file(self, path):
"""
Override BaseVM save_to_file method
"""
if self.is_dead():
raise virt_vm.VMStatusError(
"Cannot save a VM that is %s" % self.state())
logging.debug("Saving VM %s to %s" % (self.name, path))
result = virsh.save(self.name, path, uri=self.connect_uri)
if result.exit_status:
raise virt_vm.VMError("Save VM to %s failed.\n"
"Detail: %s." % (path, result.stderr))
if self.is_alive():
raise virt_vm.VMStatusError("VM not shut off after save")
self.cleanup_serial_console()
def restore_from_file(self, path):
"""
Override BaseVM restore_from_file method
"""
if self.is_alive():
raise virt_vm.VMStatusError(
"Can not restore VM that is %s" % self.state())
logging.debug("Restoring VM from %s" % path)
result = virsh.restore(path, uri=self.connect_uri)
if result.exit_status:
raise virt_vm.VMError("Restore VM from %s failed.\n"
"Detail: %s." % (path, result.stderr))
if self.is_dead():
raise virt_vm.VMStatusError(
"VM should not be %s after restore." % self.state())
self.create_serial_console()
def managedsave(self):
"""
Managed save of VM's state
"""
if self.is_dead():
raise virt_vm.VMStatusError(
"Cannot save a VM that is %s" % self.state())
logging.debug("Managed saving VM %s" % self.name)
result = virsh.managedsave(self.name, uri=self.connect_uri)
if result.exit_status:
raise virt_vm.VMError("Managed save VM failed.\n"
"Detail: %s." % result.stderr)
if self.is_alive():
raise virt_vm.VMStatusError("VM not shut off after managed save")
self.cleanup_serial_console()
def pmsuspend(self, target='mem', duration=0):
"""
Suspend a domain gracefully using power management functions
"""
if self.is_dead():
raise virt_vm.VMStatusError(
"Cannot pmsuspend a VM that is %s" % self.state())
logging.debug("PM suspending VM %s" % self.name)
result = virsh.dompmsuspend(self.name, target=target,
duration=duration, uri=self.connect_uri)
if result.exit_status:
raise virt_vm.VMError("PM suspending VM failed.\n"
"Detail: %s." % result.stderr)
self.cleanup_serial_console()
def pmwakeup(self):
"""
Wakeup a domain from pmsuspended state
"""
if self.is_dead():
raise virt_vm.VMStatusError(
"Cannot pmwakeup a VM that is %s" % self.state())
logging.debug("PM waking up VM %s" % self.name)
result = virsh.dompmwakeup(self.name, uri=self.connect_uri)
if result.exit_status:
raise virt_vm.VMError("PM waking up VM failed.\n"
"Detail: %s." % result.stderr)
self.create_serial_console()
def vcpupin(self, vcpu, cpu_list, options=""):
"""
To pin vcpu to cpu_list
"""
result = virsh.vcpupin(self.name, vcpu, cpu_list,
options, uri=self.connect_uri)
if result.exit_status:
raise exceptions.TestFail("Virsh vcpupin command failed.\n"
"Detail: %s.\n" % result)
def dominfo(self):
"""
Return a dict include vm's information.
"""
output = virsh.dominfo(self.name, uri=self.connect_uri).stdout.strip()
# Key: word before ':' | value: content after ':' (stripped)
dominfo_dict = {}
for line in output.splitlines():
key = line.split(':')[0].strip()
value = line.split(':')[-1].strip()
dominfo_dict[key] = value
return dominfo_dict
def vcpuinfo(self):
"""
Return a dict's list include vm's vcpu information.
"""
output = virsh.vcpuinfo(self.name,
uri=self.connect_uri).stdout.strip()
# Key: word before ':' | value: content after ':' (stripped)
vcpuinfo_list = []
vcpuinfo_dict = {}
for line in output.splitlines():
key = line.split(':')[0].strip()
value = line.split(':')[-1].strip()
vcpuinfo_dict[key] = value
if key == "CPU Affinity":
vcpuinfo_list.append(vcpuinfo_dict)
return vcpuinfo_list
def get_used_mem(self):
"""
Get vm's current memory(kilobytes).
"""
dominfo_dict = self.dominfo()
memory = dominfo_dict['Used memory'].split(' ')[0] # strip off ' kb'
return int(memory)
def get_blk_devices(self):
"""
Get vm's block devices.
Return a dict include all devices detail info.
example:
{target: {'type': value, 'device': value, 'source': value}}
"""
domblkdict = {}
options = "--details"
result = virsh.domblklist(self.name, options, ignore_status=True,
uri=self.connect_uri)
blklist = result.stdout.strip().splitlines()
if result.exit_status != 0:
logging.info("Get vm devices failed.")
else:
blklist = blklist[2:]
for line in blklist:
linesplit = line.split(None, 4)
target = linesplit[2]
blk_detail = {'type': linesplit[0],
'device': linesplit[1],
'source': linesplit[3]}
domblkdict[target] = blk_detail
return domblkdict
def get_disk_devices(self):
"""
Get vm's disk type block devices.
"""
blk_devices = self.get_blk_devices()
disk_devices = {}
for target in blk_devices:
details = blk_devices[target]
if details['device'] == "disk":
disk_devices[target] = details
return disk_devices
def get_first_disk_devices(self):
"""
Get vm's first disk type block devices.
"""
disk = {}
options = "--details"
result = virsh.domblklist(self.name, options, ignore_status=True,
uri=self.connect_uri)
blklist = result.stdout.strip().splitlines()
if result.exit_status != 0:
logging.info("Get vm devices failed.")
else:
blklist = blklist[2:]
linesplit = blklist[0].split(None, 4)
disk = {'type': linesplit[0],
'device': linesplit[1],
'target': linesplit[2],
'source': linesplit[3]}
return disk
def get_device_details(self, device_target):
device_details = {}
result = virsh.domblkinfo(self.name, device_target,
uri=self.connect_uri)
details = result.stdout.strip().splitlines()
if result.exit_status != 0:
logging.info("Get vm device details failed.")
else:
for line in details:
attrs = line.split(":")
device_details[attrs[0].strip()] = attrs[-1].strip()
return device_details
def get_device_size(self, device_target):
domblkdict = self.get_blk_devices()
if device_target not in domblkdict.keys():
return None
path = domblkdict[device_target]["source"]
size = self.get_device_details(device_target)["Capacity"]
return path, size
def get_max_mem(self):
"""
Get vm's maximum memory(kilobytes).
"""
dominfo_dict = self.dominfo()
max_mem = dominfo_dict['Max memory'].split(' ')[0] # strip off 'kb'
return int(max_mem)
def domjobabort(self):
"""
Abort job for vm.
"""
result = virsh.domjobabort(self.name, ignore_status=True)
if result.exit_status:
logging.debug(result)
return False
return True
def dump(self, path, option=""):
"""
Dump self to path.
:raise: exceptions.TestFail if dump fail.
"""
cmd_result = virsh.dump(self.name, path=path, option=option,
uri=self.connect_uri)
if cmd_result.exit_status:
raise exceptions.TestFail("Failed to dump %s to %s.\n"
"Detail: %s." % (self.name, path, cmd_result))
def get_job_type(self):
jobresult = virsh.domjobinfo(self.name, uri=self.connect_uri)
if not jobresult.exit_status:
for line in jobresult.stdout.splitlines():
key = line.split(':')[0]
value = line.split(':')[-1]
if key.count("type"):
return value.strip()
else:
logging.error(jobresult)
return False
def get_pci_devices(self, device_str=None):
"""
Get PCI devices in vm accroding to given device character.
:param device_str: a string to identify device.
"""
session = self.wait_for_login()
if device_str is None:
cmd = "lspci -D"
else:
cmd = "lspci -D | grep %s" % device_str
lines = session.cmd_output(cmd)
session.close()
pci_devices = []
for line in lines.splitlines():
pci_devices.append(line.split()[0])
return pci_devices
def get_disks(self, diskname=None):
"""
Get disks in vm.
:param diskname: Specify disk to be listed,
used for checking given disk.
"""
cmd = "lsblk --nodeps -n"
if diskname:
cmd += " | grep %s" % diskname
session = self.wait_for_login()
lines = session.cmd_output(cmd)
session.close()
disks = []
for line in lines.splitlines():
if line.count(" disk "):
disks.append("/dev/%s" % line.split()[0])
return disks
def get_interfaces(self):
"""
Get available interfaces in vm.
"""
cmd = "cat /proc/net/dev"
session = self.wait_for_login()
lines = session.cmd_output(cmd)
session.close()
interfaces = []
for line in lines.splitlines():
if len(line.split(':')) != 2:
continue
interfaces.append(line.split(':')[0].strip())
return interfaces
def get_interface_mac(self, interface):
"""
Get mac address of interface by given name.
"""
if interface not in self.get_interfaces():
return None
cmd = "cat /sys/class/net/%s/address" % interface
session = self.wait_for_login()
try:
mac = session.cmd_output(cmd)
except Exception, detail:
session.close()
logging.error(str(detail))
return None
session.close()
return mac.strip()
def install_package(self, name):
"""
Install a package on VM.
ToDo: Support multiple package manager.
:param name: Name of package to be installed
"""
session = self.wait_for_login()
try:
# Install the package if it does not exists
cmd = "rpm -q %s || yum install -y %s" % (name, name)
status, output = session.cmd_status_output(cmd, timeout=300)
# Just check status is not enough
# It's necessary to check if install successfully
if status != 0 or session.cmd_status("rpm -q %s" % name) != 0:
raise virt_vm.VMError("Installation of package %s failed:\n%s" %
(name, output))
finally:
session.close()
def remove_package(self, name):
"""
Remove a package from VM.
ToDo: Support multiple package manager.
:param name: Name of package to be removed
"""
session = self.wait_for_login()
try:
# Remove the package if it exists
cmd = "! rpm -q %s || rpm -e %s" % (name, name)
status, output = session.cmd_status_output(cmd, timeout=300)
if status != 0:
raise virt_vm.VMError("Removal of package %s failed:\n%s" %
(name, output))
finally:
session.close()
def prepare_guest_agent(self, prepare_xml=True, channel=True, start=True):
"""
Prepare qemu guest agent on the VM.
:param prepare_xml: Whether change VM's XML
:param channel: Whether add agent channel in VM. Only valid if
prepare_xml is True
:param start: Whether install and start the qemu-ga service
"""
if prepare_xml:
vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(self.name)
# Check if we need to change XML of VM.
if channel != bool(vmxml.get_agent_channels()):
if self.is_alive():
self.destroy()
if channel:
vmxml.set_agent_channel()
else:
vmxml.remove_agent_channels()
vmxml.sync()
if not self.is_alive():
self.start()
self.install_package('qemu-guest-agent')
session = self.wait_for_login()
def _is_ga_running():
return (not session.cmd_status("pgrep qemu-ga"))
def _is_ga_finished():
return (session.cmd_status("pgrep qemu-ga") == 1)
def _start_ga():
if not _is_ga_running():
cmd = "service qemu-guest-agent start"
status, output = session.cmd_status_output(cmd)
# Sometimes the binary of the guest agent was corrupted on the
# filesystem due to the guest being destroyed and cause service
# masked, so need to reinstall agent to fix it
if status and "is masked" in output:
self.remove_package('qemu-guest-agent')
self.install_package('qemu-guest-agent')
status, output = session.cmd_status_output(cmd)
if status and "unrecognized service" in output:
cmd = "service qemu-ga start"
status, output = session.cmd_status_output(cmd)
if status:
raise virt_vm.VMError("Start qemu-guest-agent failed:"
"\n%s" % output)
def _stop_ga():
if _is_ga_running():
cmd = "service qemu-guest-agent stop"
status, output = session.cmd_status_output(cmd)
if status and "unrecognized service" in output:
cmd = "service qemu-ga stop"
status, output = session.cmd_status_output(cmd)
if status:
raise virt_vm.VMError("Stop qemu-guest-agent failed:"
"\n%s" % output)
try:
# Start/stop qemu-guest-agent
if start:
_start_ga()
else:
_stop_ga()
# Check qemu-guest-agent status
if start:
if not utils_misc.wait_for(_is_ga_running, timeout=60):
raise virt_vm.VMError("qemu-guest-agent is not running.")
else:
if not utils_misc.wait_for(_is_ga_finished, timeout=60):
raise virt_vm.VMError("qemu-guest-agent is running")
finally:
session.close()
def getenforce(self):
"""
Set SELinux mode in the VM.
:return: SELinux mode [Enforcing|Permissive|Disabled]
"""
self.install_package('libselinux-utils')
session = self.wait_for_login()
try:
status, output = session.cmd_status_output("getenforce")
if status != 0:
raise virt_vm.VMError("Get SELinux mode failed:\n%s" % output)
return output.strip()
finally:
session.close()
def setenforce(self, mode):
"""
Set SELinux mode in the VM.
:param mode: SELinux mode [Enforcing|Permissive|1|0]
"""
self.install_package('libselinux-utils')
try:
if int(mode) == 1:
target_mode = 'Enforcing'
elif int(mode) == 0:
target_mode = 'Permissive'
except ValueError:
pass
session = self.wait_for_login()
try:
current_mode = self.getenforce()
if current_mode == 'Disabled':
logging.warning("VM SELinux disabled. Can't set mode.")
return
elif current_mode != target_mode:
cmd = "setenforce %s" % mode
status, output = session.cmd_status_output(cmd)
if status != 0:
raise virt_vm.VMError(
"Set SELinux mode failed:\n%s" % output)
else:
logging.debug("VM SELinux mode don't need change.")
finally:
session.close()
| gpl-2.0 |
narasimhan-v/avocado-misc-tests-1 | generic/htx_test.py | 4 | 7655 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
# Copyright: 2017 IBM
# Author:Praveen K Pandey <praveen@linux.vnet.ibm.com>
#
"""
HTX Test
"""
import os
import time
import shutil
from avocado import Test
from avocado.utils.software_manager import SoftwareManager
from avocado.utils import build
from avocado.utils import process, archive
from avocado.utils import distro
class HtxTest(Test):
"""
HTX [Hardware Test eXecutive] is a test tool suite. The goal of HTX is to
stress test the system by exercising all hardware components concurrently
in order to uncover any hardware design flaws and hardware-hardware or
hardware-software interaction issues.
:see:https://github.com/open-power/HTX.git
:param mdt_file: mdt file used to trigger HTX
:params time_limit: how much time(hours) you want to run this stress.
"""
def setUp(self):
"""
Setup
"""
if 'ppc64' not in distro.detect().arch:
self.cancel("Supported only on Power Architecture")
self.mdt_file = self.params.get('mdt_file', default='mdt.mem')
self.time_limit = int(self.params.get('time_limit', default=2))
self.time_unit = self.params.get('time_unit', default='m')
self.run_type = self.params.get('run_type', default='git')
if self.time_unit == 'm':
self.time_limit = self.time_limit * 60
elif self.time_unit == 'h':
self.time_limit = self.time_limit * 3600
else:
self.cancel(
"running time unit is not proper, please pass as 'm' or 'h' ")
if str(self.name.name).endswith('test_start'):
# Build HTX only at the start phase of test
self.setup_htx()
if not os.path.exists("/usr/lpp/htx/mdt/%s" % self.mdt_file):
self.cancel("MDT file %s not found due to config" % self.mdt_file)
def setup_htx(self):
"""
Builds HTX
"""
detected_distro = distro.detect()
packages = ['git', 'gcc', 'make']
if detected_distro.name in ['centos', 'fedora', 'rhel', 'redhat']:
packages.extend(['gcc-c++', 'ncurses-devel', 'tar'])
elif detected_distro.name == "Ubuntu":
packages.extend(['libncurses5', 'g++',
'ncurses-dev', 'libncurses-dev'])
elif detected_distro.name == 'SuSE':
packages.extend(['libncurses5', 'gcc-c++', 'ncurses-devel', 'tar'])
else:
self.cancel("Test not supported in %s" % detected_distro.name)
smm = SoftwareManager()
for pkg in packages:
if not smm.check_installed(pkg) and not smm.install(pkg):
self.cancel("Can not install %s" % pkg)
if self.run_type == 'git':
url = "https://github.com/open-power/HTX/archive/master.zip"
tarball = self.fetch_asset("htx.zip", locations=[url], expire='7d')
archive.extract(tarball, self.teststmpdir)
htx_path = os.path.join(self.teststmpdir, "HTX-master")
os.chdir(htx_path)
exercisers = ["hxecapi_afu_dir", "hxedapl", "hxecapi", "hxeocapi"]
for exerciser in exercisers:
process.run("sed -i 's/%s//g' %s/bin/Makefile" % (exerciser,
htx_path))
build.make(htx_path, extra_args='all')
build.make(htx_path, extra_args='tar')
process.run('tar --touch -xvzf htx_package.tar.gz')
os.chdir('htx_package')
if process.system('./installer.sh -f'):
self.fail("Installation of htx fails:please refer job.log")
else:
dist_name = detected_distro.name.lower()
if dist_name == 'suse':
dist_name = 'sles'
rpm_check = "htx%s%s" % (dist_name, detected_distro.version)
skip_install = False
ins_htx = process.system_output(
'rpm -qa | grep htx', shell=True, ignore_status=True).decode()
if ins_htx:
if not smm.check_installed(rpm_check):
self.log.info("Clearing existing HTX rpm")
process.system('rpm -e %s' %
ins_htx, shell=True, ignore_status=True)
if os.path.exists('/usr/lpp/htx'):
shutil.rmtree('/usr/lpp/htx')
else:
self.log.info("Using existing HTX")
skip_install = True
if not skip_install:
rpm_loc = self.params.get('rpm_link', default=None)
if rpm_loc:
if process.system('rpm -ivh --nodeps %s '
'--force' % rpm_loc,
shell=True, ignore_status=True):
self.cancel("Installing rpm failed")
else:
self.cancel("RPM link is required for RPM run type")
self.log.info("Starting the HTX Deamon")
process.run('/usr/lpp/htx/etc/scripts/htxd_run')
self.log.info("Creating the HTX mdt files")
process.run('htxcmdline -createmdt')
def test_start(self):
"""
Execute 'HTX' with appropriate parameters.
"""
self.log.info("selecting the mdt file")
cmd = "htxcmdline -select -mdt %s" % self.mdt_file
process.system(cmd, ignore_status=True)
self.log.info("Activating the %s", self.mdt_file)
cmd = "htxcmdline -activate -mdt %s" % self.mdt_file
process.system(cmd, ignore_status=True)
self.log.info("Running the HTX ")
cmd = "htxcmdline -run -mdt %s" % self.mdt_file
process.system(cmd, ignore_status=True)
def test_check(self):
"""
Checks if HTX is running, and if no errors.
"""
for _ in range(0, self.time_limit, 60):
self.log.info("HTX Error logs")
process.system('htxcmdline -geterrlog', ignore_status=True)
if os.stat('/tmp/htxerr').st_size != 0:
self.fail("check errorlogs for exact error and failure")
cmd = 'htxcmdline -query -mdt %s' % self.mdt_file
process.system(cmd, ignore_status=True)
time.sleep(60)
def test_stop(self):
'''
Shutdown the mdt file and the htx daemon and set SMT to original value
'''
self.stop_htx()
def stop_htx(self):
"""
Stop the HTX Run
"""
self.log.info("shutting down the %s ", self.mdt_file)
cmd = 'htxcmdline -shutdown -mdt %s' % self.mdt_file
process.system(cmd, timeout=120, ignore_status=True)
if self.run_type == 'rpm':
process.system(
'/usr/lpp/htx/etc/scripts/htxd_shutdown', ignore_status=True)
process.system('umount /htx_pmem*', shell=True, ignore_status=True)
else:
daemon_state = process.system_output('/etc/init.d/htx.d status')
if daemon_state.decode().split(" ")[-1] == 'running':
process.system('/usr/lpp/htx/etc/scripts/htxd_shutdown')
| gpl-2.0 |
isyippee/nova | nova/api/openstack/compute/floating_ip_dns.py | 32 | 10231 | # Copyright 2011 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_utils import netutils
import webob
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import floating_ip_dns
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
from nova import network
ALIAS = "os-floating-ip-dns"
authorize = extensions.os_compute_authorizer(ALIAS)
def _translate_dns_entry_view(dns_entry):
result = {}
result['ip'] = dns_entry.get('ip')
result['id'] = dns_entry.get('id')
result['type'] = dns_entry.get('type')
result['domain'] = dns_entry.get('domain')
result['name'] = dns_entry.get('name')
return {'dns_entry': result}
def _translate_dns_entries_view(dns_entries):
return {'dns_entries': [_translate_dns_entry_view(entry)['dns_entry']
for entry in dns_entries]}
def _translate_domain_entry_view(domain_entry):
result = {}
result['domain'] = domain_entry.get('domain')
result['scope'] = domain_entry.get('scope')
result['project'] = domain_entry.get('project')
result['availability_zone'] = domain_entry.get('availability_zone')
return {'domain_entry': result}
def _translate_domain_entries_view(domain_entries):
return {'domain_entries':
[_translate_domain_entry_view(entry)['domain_entry']
for entry in domain_entries]}
def _unquote_domain(domain):
"""Unquoting function for receiving a domain name in a URL.
Domain names tend to have .'s in them. Urllib doesn't quote dots,
but Routes tends to choke on them, so we need an extra level of
by-hand quoting here.
"""
return urllib.unquote(domain).replace('%2E', '.')
def _create_dns_entry(ip, name, domain):
return {'ip': ip, 'name': name, 'domain': domain}
def _create_domain_entry(domain, scope=None, project=None, av_zone=None):
return {'domain': domain, 'scope': scope, 'project': project,
'availability_zone': av_zone}
class FloatingIPDNSDomainController(wsgi.Controller):
"""DNS domain controller for OpenStack API."""
def __init__(self):
super(FloatingIPDNSDomainController, self).__init__()
self.network_api = network.API(skip_policy_check=True)
@extensions.expected_errors(501)
def index(self, req):
"""Return a list of available DNS domains."""
context = req.environ['nova.context']
authorize(context)
try:
domains = self.network_api.get_dns_domains(context)
except NotImplementedError:
common.raise_feature_not_supported()
domainlist = [_create_domain_entry(domain['domain'],
domain.get('scope'),
domain.get('project'),
domain.get('availability_zone'))
for domain in domains]
return _translate_domain_entries_view(domainlist)
@extensions.expected_errors((400, 501))
@validation.schema(floating_ip_dns.domain_entry_update)
def update(self, req, id, body):
"""Add or modify domain entry."""
context = req.environ['nova.context']
authorize(context, action="domain:update")
fqdomain = _unquote_domain(id)
entry = body['domain_entry']
scope = entry['scope']
project = entry.get('project', None)
av_zone = entry.get('availability_zone', None)
if scope == 'private' and project:
msg = _("you can not pass project if the scope is private")
raise webob.exc.HTTPBadRequest(explanation=msg)
if scope == 'public' and av_zone:
msg = _("you can not pass av_zone if the scope is public")
raise webob.exc.HTTPBadRequest(explanation=msg)
if scope == 'private':
create_dns_domain = self.network_api.create_private_dns_domain
area_name, area = 'availability_zone', av_zone
else:
create_dns_domain = self.network_api.create_public_dns_domain
area_name, area = 'project', project
try:
create_dns_domain(context, fqdomain, area)
except NotImplementedError:
common.raise_feature_not_supported()
return _translate_domain_entry_view({'domain': fqdomain,
'scope': scope,
area_name: area})
@extensions.expected_errors((404, 501))
@wsgi.response(202)
def delete(self, req, id):
"""Delete the domain identified by id."""
context = req.environ['nova.context']
authorize(context, action="domain:delete")
domain = _unquote_domain(id)
# Delete the whole domain
try:
self.network_api.delete_dns_domain(context, domain)
except NotImplementedError:
common.raise_feature_not_supported()
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
class FloatingIPDNSEntryController(wsgi.Controller):
"""DNS Entry controller for OpenStack API."""
def __init__(self):
super(FloatingIPDNSEntryController, self).__init__()
self.network_api = network.API(skip_policy_check=True)
@extensions.expected_errors((404, 501))
def show(self, req, domain_id, id):
"""Return the DNS entry that corresponds to domain_id and id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
floating_ip = None
# Check whether id is a valid ipv4/ipv6 address.
if netutils.is_valid_ip(id):
floating_ip = id
try:
if floating_ip:
entries = self.network_api.get_dns_entries_by_address(context,
floating_ip,
domain)
else:
entries = self.network_api.get_dns_entries_by_name(context,
id,
domain)
except NotImplementedError:
common.raise_feature_not_supported()
if not entries:
explanation = _("DNS entries not found.")
raise webob.exc.HTTPNotFound(explanation=explanation)
if floating_ip:
entrylist = [_create_dns_entry(floating_ip, entry, domain)
for entry in entries]
dns_entries = _translate_dns_entries_view(entrylist)
return wsgi.ResponseObject(dns_entries)
entry = _create_dns_entry(entries[0], id, domain)
return _translate_dns_entry_view(entry)
@extensions.expected_errors(501)
@validation.schema(floating_ip_dns.dns_entry_update)
def update(self, req, domain_id, id, body):
"""Add or modify dns entry."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
entry = body['dns_entry']
address = entry['ip']
dns_type = entry['dns_type']
try:
entries = self.network_api.get_dns_entries_by_name(context,
name, domain)
if not entries:
# create!
self.network_api.add_dns_entry(context, address, name,
dns_type, domain)
else:
# modify!
self.network_api.modify_dns_entry(context, name,
address, domain)
except NotImplementedError:
common.raise_feature_not_supported()
return _translate_dns_entry_view({'ip': address,
'name': name,
'type': dns_type,
'domain': domain})
@extensions.expected_errors((404, 501))
@wsgi.response(202)
def delete(self, req, domain_id, id):
"""Delete the entry identified by req and id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
try:
self.network_api.delete_dns_entry(context, name, domain)
except NotImplementedError:
common.raise_feature_not_supported()
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
class FloatingIpDns(extensions.V21APIExtensionBase):
"""Floating IP DNS support."""
name = "FloatingIpDns"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
res = extensions.ResourceExtension(ALIAS,
controller=FloatingIPDNSDomainController())
resources.append(res)
res = extensions.ResourceExtension('entries',
controller=FloatingIPDNSEntryController(),
parent={'member_name': 'domain',
'collection_name': 'os-floating-ip-dns'})
resources.append(res)
return resources
def get_controller_extensions(self):
"""It's an abstract function V21APIExtensionBase and the extension
will not be loaded without it.
"""
return []
| apache-2.0 |
nikkitan/bitcoin | test/functional/feature_pruning.py | 4 | 20906 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the pruning code.
WARNING:
This test uses 4GB of disk space.
This test takes 30 mins or more (up to 2 hours)
"""
import os
from test_framework.blocktools import create_coinbase
from test_framework.messages import CBlock, ToHex
from test_framework.script import CScript, OP_RETURN, OP_NOP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
connect_nodes,
disconnect_nodes,
wait_until,
)
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
TIMESTAMP_WINDOW = 2 * 60 * 60
def mine_large_blocks(node, n):
# Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN
# followed by 950k of OP_NOP. This would be non-standard in a non-coinbase
# transaction but is consensus valid.
# Set the nTime if this is the first time this function has been called.
# A static variable ensures that time is monotonicly increasing and is therefore
# different for each block created => blockhash is unique.
if "nTimes" not in mine_large_blocks.__dict__:
mine_large_blocks.nTime = 0
# Get the block parameters for the first block
big_script = CScript([OP_RETURN] + [OP_NOP] * 950000)
best_block = node.getblock(node.getbestblockhash())
height = int(best_block["height"]) + 1
mine_large_blocks.nTime = max(mine_large_blocks.nTime, int(best_block["time"])) + 1
previousblockhash = int(best_block["hash"], 16)
for _ in range(n):
# Build the coinbase transaction (with large scriptPubKey)
coinbase_tx = create_coinbase(height)
coinbase_tx.vin[0].nSequence = 2 ** 32 - 1
coinbase_tx.vout[0].scriptPubKey = big_script
coinbase_tx.rehash()
# Build the block
block = CBlock()
block.nVersion = best_block["version"]
block.hashPrevBlock = previousblockhash
block.nTime = mine_large_blocks.nTime
block.nBits = int('207fffff', 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Submit to the node
node.submitblock(ToHex(block))
previousblockhash = block.sha256
height += 1
mine_large_blocks.nTime += 1
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
class PruneTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
# Create nodes 0 and 1 to mine.
# Create node 2 to test pruning.
self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5"]
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
# Create nodes 5 to test wallet in prune mode, but do not connect
self.extra_args = [
self.full_node_default_args,
self.full_node_default_args,
["-maxreceivebuffer=20000", "-prune=550"],
["-maxreceivebuffer=20000"],
["-maxreceivebuffer=20000"],
["-prune=550"],
]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
self.prunedir = os.path.join(self.nodes[2].datadir, 'regtest', 'blocks', '')
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[0], 4)
self.sync_blocks(self.nodes[0:5])
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
for n in self.nodes:
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase', rescan=False)
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
self.sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
mine_large_blocks(self.nodes[0], 645)
self.sync_blocks(self.nodes[0:5])
def test_height_min(self):
assert os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early"
self.log.info("Success")
self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
mine_large_blocks(self.nodes[0], 25)
# Wait for blk00000.dat to be pruned
wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
self.log.info("Success")
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
assert_greater_than(550, usage)
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[0], 2)
# Mine 24 blocks in node 1
mine_large_blocks(self.nodes[1], 24)
# Reorg back with 25 block chain from node 0
mine_large_blocks(self.nodes[0], 25)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_blocks(self.nodes[0:3])
self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
height = self.nodes[1].getblockcount()
self.log.info("Current block height: %d" % height)
self.forkheight = height - 287
self.forkhash = self.nodes[1].getblockhash(self.forkheight)
self.log.info("Invalidating block %s at height %d" % (self.forkhash, self.forkheight))
self.nodes[1].invalidateblock(self.forkhash)
# We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1)
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
assert self.nodes[1].getblockcount() == self.forkheight - 1
self.log.info("New best height: %d" % self.nodes[1].getblockcount())
# Disconnect node1 and generate the new chain
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 2)
self.log.info("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
self.log.info("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
self.sync_blocks(self.nodes[0:3], timeout=120)
self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
self.log.info("Usage possibly still high because of stale blocks in block files: %d" % calc_usage(self.prunedir))
self.log.info("Mine 220 more large blocks so we have requisite history")
mine_large_blocks(self.nodes[0], 220)
self.sync_blocks(self.nodes[0:3], timeout=120)
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
assert_greater_than(550, usage)
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']):
self.nodes[2].verifychain(checklevel=4, nblocks=0)
self.log.info("Will need to redownload block %d" % self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large blocks are in the block files after it,
# it is expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer that's on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert_equal(self.nodes[0].getblockcount(), self.mainchainheight)
assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
# Wait for Node 2 to reorg to proper height
wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
assert_equal(self.nodes[2].getbestblockhash(), goalbesthash)
# Verify we can now have the data for a block previously pruned
assert_equal(self.nodes[2].getblock(self.forkhash)["height"], self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
self.start_node(node_number)
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500)
# now re-start in manual pruning mode
self.stop_node(node_number)
self.start_node(node_number, extra_args=["-prune=1"])
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW
else:
return index
def prune(index):
ret = node.pruneblockchain(height=height(index))
assert_equal(ret, node.getblockchaininfo()['pruneheight'])
def has_block(index):
return os.path.isfile(os.path.join(self.nodes[node_number].datadir, "regtest", "blocks", "blk{:05}.dat".format(index)))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# Save block transaction count before pruning, assert value
block1_details = node.getblock(node.getblockhash(1))
assert_equal(block1_details["nTx"], len(block1_details["tx"]))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# Pruned block should still know the number of transactions
assert_equal(node.getblockheader(node.getblockhash(1))["nTx"], block1_details["nTx"])
# negative heights should raise an exception
assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
assert has_block(0), "blk00000.dat is missing when should still be there"
# Does nothing
node.pruneblockchain(height(0))
assert has_block(0), "blk00000.dat is missing when should still be there"
# height=500 should prune first file
prune(500)
assert not has_block(0), "blk00000.dat is still there, should be pruned by now"
assert has_block(1), "blk00001.dat is missing when should still be there"
# height=650 should prune second file
prune(650)
assert not has_block(1), "blk00001.dat is still there, should be pruned by now"
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000)
assert has_block(2), "blk00002.dat is still there, should be pruned by now"
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
assert not has_block(2), "blk00002.dat is still there, should be pruned by now"
assert not has_block(3), "blk00003.dat is still there, should be pruned by now"
# stop node, start back up with auto-prune at 550 MiB, make sure still runs
self.stop_node(node_number)
self.start_node(node_number, extra_args=["-prune=550"])
self.log.info("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
self.log.info("Stop and start pruning node to trigger wallet rescan")
self.stop_node(2)
self.start_node(2, extra_args=["-prune=550"])
self.log.info("Success")
# check that wallet loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
self.log.info("Syncing node 5 to test wallet")
connect_nodes(self.nodes[0], 5)
nds = [self.nodes[0], self.nodes[5]]
self.sync_blocks(nds, wait=5, timeout=300)
self.stop_node(5) # stop and start to trigger rescan
self.start_node(5, extra_args=["-prune=550"])
self.log.info("Success")
def run_test(self):
self.log.info("Warning! This test requires 4GB of disk space")
self.log.info("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() # 1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
self.log.info("Check that we can survive a 288 block reorg still")
self.reorg_test() # (1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
self.log.info("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
self.log.info("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
self.log.info("Test wallet re-scan")
self.wallet_test()
self.log.info("Done")
if __name__ == '__main__':
PruneTest().main()
| mit |
Pablo126/SSBW | Tarea4/tarea4/lib/python3.5/site-packages/wheel/pkginfo.py | 565 | 1225 | """Tools for reading and writing PKG-INFO / METADATA without caring
about the encoding."""
from email.parser import Parser
try:
unicode
_PY3 = False
except NameError:
_PY3 = True
if not _PY3:
from email.generator import Generator
def read_pkg_info_bytes(bytestr):
return Parser().parsestr(bytestr)
def read_pkg_info(path):
with open(path, "r") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, 'w') as metadata:
Generator(metadata, maxheaderlen=0).flatten(message)
else:
from email.generator import BytesGenerator
def read_pkg_info_bytes(bytestr):
headers = bytestr.decode(encoding="ascii", errors="surrogateescape")
message = Parser().parsestr(headers)
return message
def read_pkg_info(path):
with open(path, "r",
encoding="ascii",
errors="surrogateescape") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, "wb") as out:
BytesGenerator(out, maxheaderlen=0).flatten(message)
| gpl-3.0 |
ShoRit/shipping-costs-sample | v2/lib/python2.7/site-packages/wheel/install.py | 472 | 18070 | """
Operations on existing wheel files, including basic installation.
"""
# XXX see patched pip to install
import sys
import warnings
import os.path
import re
import zipfile
import hashlib
import csv
import shutil
try:
_big_number = sys.maxsize
except NameError:
_big_number = sys.maxint
from wheel.decorator import reify
from wheel.util import (urlsafe_b64encode, from_json, urlsafe_b64decode,
native, binary, HashingFile)
from wheel import signatures
from wheel.pkginfo import read_pkg_info_bytes
from wheel.util import open_for_csv
from .pep425tags import get_supported
from .paths import get_install_paths
# The next major version after this version of the 'wheel' tool:
VERSION_TOO_HIGH = (1, 0)
# Non-greedy matching of an optional build number may be too clever (more
# invalid wheel filenames will match). Separate regex for .dist-info?
WHEEL_INFO_RE = re.compile(
r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE).match
def parse_version(version):
"""Use parse_version from pkg_resources or distutils as available."""
global parse_version
try:
from pkg_resources import parse_version
except ImportError:
from distutils.version import LooseVersion as parse_version
return parse_version(version)
class BadWheelFile(ValueError):
pass
class WheelFile(object):
"""Parse wheel-specific attributes from a wheel (.whl) file and offer
basic installation and verification support.
WheelFile can be used to simply parse a wheel filename by avoiding the
methods that require the actual file contents."""
WHEEL_INFO = "WHEEL"
RECORD = "RECORD"
def __init__(self,
filename,
fp=None,
append=False,
context=get_supported):
"""
:param fp: A seekable file-like object or None to open(filename).
:param append: Open archive in append mode.
:param context: Function returning list of supported tags. Wheels
must have the same context to be sortable.
"""
self.filename = filename
self.fp = fp
self.append = append
self.context = context
basename = os.path.basename(filename)
self.parsed_filename = WHEEL_INFO_RE(basename)
if not basename.endswith('.whl') or self.parsed_filename is None:
raise BadWheelFile("Bad filename '%s'" % filename)
def __repr__(self):
return self.filename
@property
def distinfo_name(self):
return "%s.dist-info" % self.parsed_filename.group('namever')
@property
def datadir_name(self):
return "%s.data" % self.parsed_filename.group('namever')
@property
def record_name(self):
return "%s/%s" % (self.distinfo_name, self.RECORD)
@property
def wheelinfo_name(self):
return "%s/%s" % (self.distinfo_name, self.WHEEL_INFO)
@property
def tags(self):
"""A wheel file is compatible with the Cartesian product of the
period-delimited tags in its filename.
To choose a wheel file among several candidates having the same
distribution version 'ver', an installer ranks each triple of
(pyver, abi, plat) that its Python installation can run, sorting
the wheels by the best-ranked tag it supports and then by their
arity which is just len(list(compatibility_tags)).
"""
tags = self.parsed_filename.groupdict()
for pyver in tags['pyver'].split('.'):
for abi in tags['abi'].split('.'):
for plat in tags['plat'].split('.'):
yield (pyver, abi, plat)
compatibility_tags = tags
@property
def arity(self):
"""The number of compatibility tags the wheel declares."""
return len(list(self.compatibility_tags))
@property
def rank(self):
"""
Lowest index of any of this wheel's tags in self.context(), and the
arity e.g. (0, 1)
"""
return self.compatibility_rank(self.context())
@property
def compatible(self):
return self.rank[0] != _big_number # bad API!
# deprecated:
def compatibility_rank(self, supported):
"""Rank the wheel against the supported tags. Smaller ranks are more
compatible!
:param supported: A list of compatibility tags that the current
Python implemenation can run.
"""
preferences = []
for tag in self.compatibility_tags:
try:
preferences.append(supported.index(tag))
# Tag not present
except ValueError:
pass
if len(preferences):
return (min(preferences), self.arity)
return (_big_number, 0)
# deprecated
def supports_current_python(self, x):
assert self.context == x, 'context mismatch'
return self.compatible
# Comparability.
# Wheels are equal if they refer to the same file.
# If two wheels are not equal, compare based on (in this order):
# 1. Name
# 2. Version
# 3. Compatibility rank
# 4. Filename (as a tiebreaker)
@property
def _sort_key(self):
return (self.parsed_filename.group('name'),
parse_version(self.parsed_filename.group('ver')),
tuple(-x for x in self.rank),
self.filename)
def __eq__(self, other):
return self.filename == other.filename
def __ne__(self, other):
return self.filename != other.filename
def __lt__(self, other):
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
return self._sort_key < other._sort_key
# XXX prune
sn = self.parsed_filename.group('name')
on = other.parsed_filename.group('name')
if sn != on:
return sn < on
sv = parse_version(self.parsed_filename.group('ver'))
ov = parse_version(other.parsed_filename.group('ver'))
if sv != ov:
return sv < ov
# Compatibility
if self.context != other.context:
raise TypeError("{0}.context != {1}.context".format(self, other))
sc = self.rank
oc = other.rank
if sc != None and oc != None and sc != oc:
# Smaller compatibility ranks are "better" than larger ones,
# so we have to reverse the sense of the comparison here!
return sc > oc
elif sc == None and oc != None:
return False
return self.filename < other.filename
def __gt__(self, other):
return other < self
def __le__(self, other):
return self == other or self < other
def __ge__(self, other):
return self == other or other < self
#
# Methods using the file's contents:
#
@reify
def zipfile(self):
mode = "r"
if self.append:
mode = "a"
vzf = VerifyingZipFile(self.fp if self.fp else self.filename, mode)
if not self.append:
self.verify(vzf)
return vzf
@reify
def parsed_wheel_info(self):
"""Parse wheel metadata (the .data/WHEEL file)"""
return read_pkg_info_bytes(self.zipfile.read(self.wheelinfo_name))
def check_version(self):
version = self.parsed_wheel_info['Wheel-Version']
if tuple(map(int, version.split('.'))) >= VERSION_TOO_HIGH:
raise ValueError("Wheel version is too high")
@reify
def install_paths(self):
"""
Consult distutils to get the install paths for our dist. A dict with
('purelib', 'platlib', 'headers', 'scripts', 'data').
We use the name from our filename as the dist name, which means headers
could be installed in the wrong place if the filesystem-escaped name
is different than the Name. Who cares?
"""
name = self.parsed_filename.group('name')
return get_install_paths(name)
def install(self, force=False, overrides={}):
"""
Install the wheel into site-packages.
"""
# Utility to get the target directory for a particular key
def get_path(key):
return overrides.get(key) or self.install_paths[key]
# The base target location is either purelib or platlib
if self.parsed_wheel_info['Root-Is-Purelib'] == 'true':
root = get_path('purelib')
else:
root = get_path('platlib')
# Parse all the names in the archive
name_trans = {}
for info in self.zipfile.infolist():
name = info.filename
# Zip files can contain entries representing directories.
# These end in a '/'.
# We ignore these, as we create directories on demand.
if name.endswith('/'):
continue
# Pathnames in a zipfile namelist are always /-separated.
# In theory, paths could start with ./ or have other oddities
# but this won't happen in practical cases of well-formed wheels.
# We'll cover the simple case of an initial './' as it's both easy
# to do and more common than most other oddities.
if name.startswith('./'):
name = name[2:]
# Split off the base directory to identify files that are to be
# installed in non-root locations
basedir, sep, filename = name.partition('/')
if sep and basedir == self.datadir_name:
# Data file. Target destination is elsewhere
key, sep, filename = filename.partition('/')
if not sep:
raise ValueError("Invalid filename in wheel: {0}".format(name))
target = get_path(key)
else:
# Normal file. Target destination is root
key = ''
target = root
filename = name
# Map the actual filename from the zipfile to its intended target
# directory and the pathname relative to that directory.
dest = os.path.normpath(os.path.join(target, filename))
name_trans[info] = (key, target, filename, dest)
# We're now ready to start processing the actual install. The process
# is as follows:
# 1. Prechecks - is the wheel valid, is its declared architecture
# OK, etc. [[Responsibility of the caller]]
# 2. Overwrite check - do any of the files to be installed already
# exist?
# 3. Actual install - put the files in their target locations.
# 4. Update RECORD - write a suitably modified RECORD file to
# reflect the actual installed paths.
if not force:
for info, v in name_trans.items():
k = info.filename
key, target, filename, dest = v
if os.path.exists(dest):
raise ValueError("Wheel file {0} would overwrite {1}. Use force if this is intended".format(k, dest))
# Get the name of our executable, for use when replacing script
# wrapper hashbang lines.
# We encode it using getfilesystemencoding, as that is "the name of
# the encoding used to convert Unicode filenames into system file
# names".
exename = sys.executable.encode(sys.getfilesystemencoding())
record_data = []
record_name = self.distinfo_name + '/RECORD'
for info, (key, target, filename, dest) in name_trans.items():
name = info.filename
source = self.zipfile.open(info)
# Skip the RECORD file
if name == record_name:
continue
ddir = os.path.dirname(dest)
if not os.path.isdir(ddir):
os.makedirs(ddir)
destination = HashingFile(open(dest, 'wb'))
if key == 'scripts':
hashbang = source.readline()
if hashbang.startswith(b'#!python'):
hashbang = b'#!' + exename + binary(os.linesep)
destination.write(hashbang)
shutil.copyfileobj(source, destination)
reldest = os.path.relpath(dest, root)
reldest.replace(os.sep, '/')
record_data.append((reldest, destination.digest(), destination.length))
destination.close()
source.close()
# preserve attributes (especially +x bit for scripts)
attrs = info.external_attr >> 16
if attrs: # tends to be 0 if Windows.
os.chmod(dest, info.external_attr >> 16)
record_name = os.path.join(root, self.record_name)
writer = csv.writer(open_for_csv(record_name, 'w+'))
for reldest, digest, length in sorted(record_data):
writer.writerow((reldest, digest, length))
writer.writerow((self.record_name, '', ''))
def verify(self, zipfile=None):
"""Configure the VerifyingZipFile `zipfile` by verifying its signature
and setting expected hashes for every hash in RECORD.
Caller must complete the verification process by completely reading
every file in the archive (e.g. with extractall)."""
sig = None
if zipfile is None:
zipfile = self.zipfile
zipfile.strict = True
record_name = '/'.join((self.distinfo_name, 'RECORD'))
sig_name = '/'.join((self.distinfo_name, 'RECORD.jws'))
# tolerate s/mime signatures:
smime_sig_name = '/'.join((self.distinfo_name, 'RECORD.p7s'))
zipfile.set_expected_hash(record_name, None)
zipfile.set_expected_hash(sig_name, None)
zipfile.set_expected_hash(smime_sig_name, None)
record = zipfile.read(record_name)
record_digest = urlsafe_b64encode(hashlib.sha256(record).digest())
try:
sig = from_json(native(zipfile.read(sig_name)))
except KeyError: # no signature
pass
if sig:
headers, payload = signatures.verify(sig)
if payload['hash'] != "sha256=" + native(record_digest):
msg = "RECORD.sig claimed RECORD hash {0} != computed hash {1}."
raise BadWheelFile(msg.format(payload['hash'],
native(record_digest)))
reader = csv.reader((native(r) for r in record.splitlines()))
for row in reader:
filename = row[0]
hash = row[1]
if not hash:
if filename not in (record_name, sig_name):
sys.stderr.write("%s has no hash!\n" % filename)
continue
algo, data = row[1].split('=', 1)
assert algo == "sha256", "Unsupported hash algorithm"
zipfile.set_expected_hash(filename, urlsafe_b64decode(binary(data)))
class VerifyingZipFile(zipfile.ZipFile):
"""ZipFile that can assert that each of its extracted contents matches
an expected sha256 hash. Note that each file must be completly read in
order for its hash to be checked."""
def __init__(self, file, mode="r",
compression=zipfile.ZIP_STORED,
allowZip64=False):
zipfile.ZipFile.__init__(self, file, mode, compression, allowZip64)
self.strict = False
self._expected_hashes = {}
self._hash_algorithm = hashlib.sha256
def set_expected_hash(self, name, hash):
"""
:param name: name of zip entry
:param hash: bytes of hash (or None for "don't care")
"""
self._expected_hashes[name] = hash
def open(self, name_or_info, mode="r", pwd=None):
"""Return file-like object for 'name'."""
# A non-monkey-patched version would contain most of zipfile.py
ef = zipfile.ZipFile.open(self, name_or_info, mode, pwd)
if isinstance(name_or_info, zipfile.ZipInfo):
name = name_or_info.filename
else:
name = name_or_info
if (name in self._expected_hashes
and self._expected_hashes[name] != None):
expected_hash = self._expected_hashes[name]
try:
_update_crc_orig = ef._update_crc
except AttributeError:
warnings.warn('Need ZipExtFile._update_crc to implement '
'file hash verification (in Python >= 2.7)')
return ef
running_hash = self._hash_algorithm()
if hasattr(ef, '_eof'): # py33
def _update_crc(data):
_update_crc_orig(data)
running_hash.update(data)
if ef._eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
else:
def _update_crc(data, eof=None):
_update_crc_orig(data, eof=eof)
running_hash.update(data)
if eof and running_hash.digest() != expected_hash:
raise BadWheelFile("Bad hash for file %r" % ef.name)
ef._update_crc = _update_crc
elif self.strict and name not in self._expected_hashes:
raise BadWheelFile("No expected hash for file %r" % ef.name)
return ef
def pop(self):
"""Truncate the last file off this zipfile.
Assumes infolist() is in the same order as the files (true for
ordinary zip files created by Python)"""
if not self.fp:
raise RuntimeError(
"Attempt to pop from ZIP archive that was already closed")
last = self.infolist().pop()
del self.NameToInfo[last.filename]
self.fp.seek(last.header_offset, os.SEEK_SET)
self.fp.truncate()
self._didModify = True
| apache-2.0 |
trenton3983/Doing_Math_with_Python | chapter6/solutions/sierpinski.py | 2 | 1545 | '''
sierpinski.py
Draw Sierpinski Triangle
'''
import random
import matplotlib.pyplot as plt
def transformation_1(p):
x = p[0]
y = p[1]
x1 = 0.5*x
y1 = 0.5*y
return x1, y1
def transformation_2(p):
x = p[0]
y = p[1]
x1 = 0.5*x + 0.5
y1 = 0.5*y + 0.5
return x1, y1
def transformation_3(p):
x = p[0]
y = p[1]
x1 = 0.5*x + 1
y1 = 0.5*y
return x1, y1
def get_index(probability):
r = random.random()
c_probability = 0
sum_probability = []
for p in probability:
c_probability += p
sum_probability.append(c_probability)
for item, sp in enumerate(sum_probability):
if r <= sp:
return item
return len(probability)-1
def transform(p):
# list of transformation functions
transformations = [transformation_1, transformation_2, transformation_3]
probability = [1/3, 1/3, 1/3]
# pick a random transformation function and call it
tindex = get_index(probability)
t = transformations[tindex]
x, y = t(p)
return x, y
def draw_sierpinski(n):
# We start with (0, 0)
x = [0]
y = [0]
x1, y1 = 0, 0
for i in range(n):
x1, y1 = transform((x1, y1))
x.append(x1)
y.append(y1)
return x, y
if __name__ == '__main__':
n = int(input('Enter the desired number of points'
'in the Sierpinski Triangle: '))
x, y = draw_sierpinski(n)
# Plot the points
plt.plot(x, y, 'o')
plt.title('Sierpinski with {0} points'.format(n))
plt.show()
| mit |
haiyangd/Gelatin | src/Gelatin/generator/Builder.py | 2 | 3593 | # Copyright (C) 2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import re
import shutil
from tempfile import NamedTemporaryFile
from urlparse import urlparse
from cgi import parse_qs
value = r'"(?:\\.|[^"])*"'
attrib = r'(?:[\$\w\-]+=%s)' % value
path_re = re.compile(r'^[^/"\?]+(?:\?%s?(?:&%s?)*)?' % (attrib, attrib))
class Builder(object):
"""
Abstract base class for all generators.
"""
def __init__(self):
raise NotImplementedError('abstract method')
def serialize(self):
raise NotImplementedError('abstract method')
def serialize_to_file(self, filename):
with NamedTemporaryFile(delete = False) as thefile:
thefile.write(self.serialize())
if os.path.exists(filename):
os.unlink(filename)
shutil.move(thefile.name, filename)
def dump(self):
raise NotImplementedError('abstract method')
def _splitpath(self, path):
match = path_re.match(path)
result = []
while match is not None:
result.append(match.group(0))
path = path[len(match.group(0)) + 1:]
match = path_re.match(path)
return result
def _splittag(self, tag):
url = urlparse(tag)
attribs = []
for key, value in parse_qs(url.query).iteritems():
value = value[0]
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
attribs.append((str(key.lower()), value))
return url.path.replace(' ', '-').lower(), attribs
def create(self, path, data = None):
"""
Creates the given node, regardless of whether or not it already
exists.
Returns the new node.
"""
raise NotImplementedError('abstract method')
def add(self, path, data = None, replace = False):
"""
Creates the given node if it does not exist.
Returns the (new or existing) node.
"""
raise NotImplementedError('abstract method')
def add_attribute(self, path, name, value):
"""
Creates the given attribute and sets it to the given value.
Returns the (new or existing) node to which the attribute was added.
"""
raise NotImplementedError('abstract method')
def open(self, path):
"""
Creates and enters the given node, regardless of whether it already
exists.
Returns the new node.
"""
raise NotImplementedError('abstract method')
def enter(self, path):
"""
Enters the given node. Creates it if it does not exist.
Returns the node.
"""
raise NotImplementedError('abstract method')
def leave(self):
"""
Returns to the node that was selected before the last call to enter().
The history is a stack, to the method may be called multiple times.
"""
raise NotImplementedError('abstract method')
| gpl-2.0 |
MaizerGomes/youtube-dl | youtube_dl/extractor/nfl.py | 76 | 6197 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
remove_end,
)
class NFLIE(InfoExtractor):
IE_NAME = 'nfl.com'
_VALID_URL = r'''(?x)https?://
(?P<host>(?:www\.)?(?:nfl\.com|.*?\.clubs\.nfl\.com))/
(?:.+?/)*
(?P<id>(?:[a-z0-9]{16}|\w{8}\-(?:\w{4}\-){3}\w{12}))'''
_TESTS = [
{
'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
'md5': '394ef771ddcd1354f665b471d78ec4c6',
'info_dict': {
'id': '0ap3000000398478',
'ext': 'mp4',
'title': 'Week 3: Redskins vs. Eagles highlights',
'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
'upload_date': '20140921',
'timestamp': 1411337580,
'thumbnail': 're:^https?://.*\.jpg$',
}
},
{
'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266',
'md5': 'cf85bdb4bc49f6e9d3816d130c78279c',
'info_dict': {
'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266',
'ext': 'mp4',
'title': 'LIVE: Post Game vs. Browns',
'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8',
'upload_date': '20131229',
'timestamp': 1388354455,
'thumbnail': 're:^https?://.*\.jpg$',
}
},
{
'url': 'http://www.nfl.com/news/story/0ap3000000467586/article/patriots-seahawks-involved-in-lategame-skirmish',
'info_dict': {
'id': '0ap3000000467607',
'ext': 'mp4',
'title': 'Frustrations flare on the field',
'description': 'Emotions ran high at the end of the Super Bowl on both sides of the ball after a dramatic finish.',
'timestamp': 1422850320,
'upload_date': '20150202',
},
},
{
'url': 'http://www.nfl.com/videos/nfl-network-top-ten/09000d5d810a6bd4/Top-10-Gutsiest-Performances-Jack-Youngblood',
'only_matching': True,
}
]
@staticmethod
def prepend_host(host, url):
if not url.startswith('http'):
if not url.startswith('/'):
url = '/%s' % url
url = 'http://{0:}{1:}'.format(host, url)
return url
@staticmethod
def format_from_stream(stream, protocol, host, path_prefix='',
preference=0, note=None):
url = '{protocol:}://{host:}/{prefix:}{path:}'.format(
protocol=protocol,
host=host,
prefix=path_prefix,
path=stream.get('path'),
)
return {
'url': url,
'vbr': int_or_none(stream.get('rate', 0), 1000),
'preference': preference,
'format_note': note,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id, host = mobj.group('id'), mobj.group('host')
webpage = self._download_webpage(url, video_id)
config_url = NFLIE.prepend_host(host, self._search_regex(
r'(?:config|configURL)\s*:\s*"([^"]+)"', webpage, 'config URL',
default='static/content/static/config/video/config.json'))
# For articles, the id in the url is not the video id
video_id = self._search_regex(
r'contentId\s*:\s*"([^"]+)"', webpage, 'video id', default=video_id)
config = self._download_json(config_url, video_id,
note='Downloading player config')
url_template = NFLIE.prepend_host(
host, '{contentURLTemplate:}'.format(**config))
video_data = self._download_json(
url_template.format(id=video_id), video_id)
formats = []
cdn_data = video_data.get('cdnData', {})
streams = cdn_data.get('bitrateInfo', [])
if cdn_data.get('format') == 'EXTERNAL_HTTP_STREAM':
parts = compat_urllib_parse_urlparse(cdn_data.get('uri'))
protocol, host = parts.scheme, parts.netloc
for stream in streams:
formats.append(
NFLIE.format_from_stream(stream, protocol, host))
else:
cdns = config.get('cdns')
if not cdns:
raise ExtractorError('Failed to get CDN data', expected=True)
for name, cdn in cdns.items():
# LimeLight streams don't seem to work
if cdn.get('name') == 'LIMELIGHT':
continue
protocol = cdn.get('protocol')
host = remove_end(cdn.get('host', ''), '/')
if not (protocol and host):
continue
prefix = cdn.get('pathprefix', '')
if prefix and not prefix.endswith('/'):
prefix = '%s/' % prefix
preference = 0
if protocol == 'rtmp':
preference = -2
elif 'prog' in name.lower():
preference = 1
for stream in streams:
formats.append(
NFLIE.format_from_stream(stream, protocol, host,
prefix, preference, name))
self._sort_formats(formats)
thumbnail = None
for q in ('xl', 'l', 'm', 's', 'xs'):
thumbnail = video_data.get('imagePaths', {}).get(q)
if thumbnail:
break
return {
'id': video_id,
'title': video_data.get('headline'),
'formats': formats,
'description': video_data.get('caption'),
'duration': video_data.get('duration'),
'thumbnail': thumbnail,
'timestamp': int_or_none(video_data.get('posted'), 1000),
}
| unlicense |
bratsche/Neutron-Drive | google_appengine/lib/django_1_2/tests/regressiontests/syndication/tests.py | 39 | 14191 | import datetime
from django.contrib.syndication import feeds, views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import tzinfo
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from models import Entry
from xml.dom import minidom
try:
set
except NameError:
from sets import Set as set
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def assertChildNodes(self, elem, expected):
actual = set([n.nodeName for n in elem.childNodes])
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected));
######################################
# Feed view
######################################
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
last_build_date = rfc2822_date(d.replace(tzinfo=ltz))
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
#'atom:link': '',
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django']);
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).date
ltz = tzinfo.LocalTimezone(d)
pub_date = rfc2822_date(d.replace(tzinfo=ltz))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': 'test@example.com (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing']);
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'category', 'updated', 'rights', 'author'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'ministry', 'rights', 'author', 'updated', 'category'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEquals(title.firstChild.wholeText, u'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the recived zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
latest = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
self.assertEqual(updated[-6:], '+00:42')
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
"""
Test URLs are prefixed with https:// when feed is requested over HTTPS.
"""
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
"""
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry',
'description': 'Description in your templates: My first entry',
'link': 'http://example.com/blog/1/',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:uhoh@djangoproject.com'),
'mailto:uhoh@djangoproject.com'
)
######################################
# Deprecated feeds
######################################
class DeprecatedSyndicationFeedTest(FeedTestCase):
"""
Tests for the deprecated API (feed() view and the feed_dict etc).
"""
def test_empty_feed_dict(self):
"""
Test that an empty feed_dict raises a 404.
"""
response = self.client.get('/syndication/depr-feeds-empty/aware-dates/')
self.assertEquals(response.status_code, 404)
def test_nonexistent_slug(self):
"""
Test that a non-existent slug raises a 404.
"""
response = self.client.get('/syndication/depr-feeds/foobar/')
self.assertEquals(response.status_code, 404)
def test_rss_feed(self):
"""
A simple test for Rss201rev2Feed feeds generated by the deprecated
system.
"""
response = self.client.get('/syndication/depr-feeds/rss/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
chan = feed.getElementsByTagName('channel')[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link'])
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
def test_complex_base_url(self):
"""
Tests that the base url for a complex feed doesn't raise a 500
exception.
"""
response = self.client.get('/syndication/depr-feeds/complex/')
self.assertEquals(response.status_code, 404)
| bsd-3-clause |
gurneyalex/OpenUpgrade | openerp/report/render/rml2html/rml2html.py | 438 | 15438 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2005, Fabien Pinckaers, UCL, FSA
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import sys
import cStringIO
from lxml import etree
import copy
from openerp.report.render.rml2pdf import utils
class _flowable(object):
def __init__(self, template, doc, localcontext = None):
self._tags = {
'title': self._tag_title,
'spacer': self._tag_spacer,
'para': self._tag_para,
'section':self._section,
'nextFrame': self._tag_next_frame,
'blockTable': self._tag_table,
'pageBreak': self._tag_page_break,
'setNextTemplate': self._tag_next_template,
}
self.template = template
self.doc = doc
self.localcontext = localcontext
self._cache = {}
def _tag_page_break(self, node):
return '<br/>'*3
def _tag_next_template(self, node):
return ''
def _tag_next_frame(self, node):
result=self.template.frame_stop()
result+='<br/>'
result+=self.template.frame_start()
return result
def _tag_title(self, node):
node.tag='h1'
return etree.tostring(node)
def _tag_spacer(self, node):
length = 1+int(utils.unit_get(node.get('length')))/35
return "<br/>"*length
def _tag_table(self, node):
new_node = copy.deepcopy(node)
for child in new_node:
new_node.remove(child)
new_node.tag = 'table'
def process(node,new_node):
for child in utils._child_get(node,self):
new_child = copy.deepcopy(child)
new_node.append(new_child)
if len(child):
for n in new_child:
new_child.remove(n)
process(child, new_child)
else:
new_child.text = utils._process_text(self, child.text)
new_child.tag = 'p'
try:
if new_child.get('style').find('terp_tblheader')!= -1:
new_node.tag = 'th'
except Exception:
pass
process(node,new_node)
if new_node.get('colWidths',False):
sizes = map(lambda x: utils.unit_get(x), new_node.get('colWidths').split(','))
tr = etree.SubElement(new_node, 'tr')
for s in sizes:
etree.SubElement(tr, 'td', width=str(s))
return etree.tostring(new_node)
def _tag_para(self, node):
new_node = copy.deepcopy(node)
new_node.tag = 'p'
if new_node.attrib.get('style',False):
new_node.set('class', new_node.get('style'))
new_node.text = utils._process_text(self, node.text)
return etree.tostring(new_node)
def _section(self, node):
result = ''
for child in utils._child_get(node, self):
if child.tag in self._tags:
result += self._tags[child.tag](child)
return result
def render(self, node):
result = self.template.start()
result += self.template.frame_start()
for n in utils._child_get(node, self):
if n.tag in self._tags:
result += self._tags[n.tag](n)
else:
pass
result += self.template.frame_stop()
result += self.template.end()
return result.encode('utf-8').replace('"',"\'").replace('°','°')
class _rml_tmpl_tag(object):
def __init__(self, *args):
pass
def tag_start(self):
return ''
def tag_end(self):
return False
def tag_stop(self):
return ''
def tag_mergeable(self):
return True
class _rml_tmpl_frame(_rml_tmpl_tag):
def __init__(self, posx, width):
self.width = width
self.posx = posx
def tag_start(self):
return "<table border=\'0\' width=\'%d\'><tr><td width=\'%d\'> </td><td>" % (self.width+self.posx,self.posx)
def tag_end(self):
return True
def tag_stop(self):
return '</td></tr></table><br/>'
def tag_mergeable(self):
return False
def merge(self, frame):
pass
class _rml_tmpl_draw_string(_rml_tmpl_tag):
def __init__(self, node, style,localcontext = {}):
self.localcontext = localcontext
self.posx = utils.unit_get(node.get('x'))
self.posy = utils.unit_get(node.get('y'))
aligns = {
'drawString': 'left',
'drawRightString': 'right',
'drawCentredString': 'center'
}
align = aligns[node.tag]
self.pos = [(self.posx, self.posy, align, utils._process_text(self, node.text), style.get('td'), style.font_size_get('td'))]
def tag_start(self):
self.pos.sort()
res = "<table border='0' cellpadding='0' cellspacing='0'><tr>"
posx = 0
i = 0
for (x,y,align,txt, style, fs) in self.pos:
if align=="left":
pos2 = len(txt)*fs
res+="<td width=\'%d\'></td><td style=\'%s\' width=\'%d\'>%s</td>" % (x - posx, style, pos2, txt)
posx = x+pos2
if align=="right":
res+="<td width=\'%d\' align=\'right\' style=\'%s\'>%s</td>" % (x - posx, style, txt)
posx = x
if align=="center":
res+="<td width=\'%d\' align=\'center\' style=\'%s\'>%s</td>" % ((x - posx)*2, style, txt)
posx = 2*x-posx
i+=1
res+='</tr></table>'
return res
def merge(self, ds):
self.pos+=ds.pos
class _rml_tmpl_draw_lines(_rml_tmpl_tag):
def __init__(self, node, style, localcontext = {}):
self.localcontext = localcontext
coord = [utils.unit_get(x) for x in utils._process_text(self, node.text).split(' ')]
self.ok = False
self.posx = coord[0]
self.posy = coord[1]
self.width = coord[2]-coord[0]
self.ok = coord[1]==coord[3]
self.style = style
self.style = style.get('hr')
def tag_start(self):
if self.ok:
return "<table border=\'0\' cellpadding=\'0\' cellspacing=\'0\' width=\'%d\'><tr><td width=\'%d\'></td><td><hr width=\'100%%\' style=\'margin:0px; %s\'></td></tr></table>" % (self.posx+self.width,self.posx,self.style)
else:
return ''
class _rml_stylesheet(object):
def __init__(self, localcontext, stylesheet, doc):
self.doc = doc
self.localcontext = localcontext
self.attrs = {}
self._tags = {
'fontSize': lambda x: ('font-size',str(utils.unit_get(x)+5.0)+'px'),
'alignment': lambda x: ('text-align',str(x))
}
result = ''
for ps in stylesheet.findall('paraStyle'):
attr = {}
attrs = ps.attrib
for key, val in attrs.items():
attr[key] = val
attrs = []
for a in attr:
if a in self._tags:
attrs.append('%s:%s' % self._tags[a](attr[a]))
if len(attrs):
result += 'p.'+attr['name']+' {'+'; '.join(attrs)+'}\n'
self.result = result
def render(self):
return self.result
class _rml_draw_style(object):
def __init__(self):
self.style = {}
self._styles = {
'fill': lambda x: {'td': {'color':x.get('color')}},
'setFont': lambda x: {'td': {'font-size':x.get('size')+'px'}},
'stroke': lambda x: {'hr': {'color':x.get('color')}},
}
def update(self, node):
if node.tag in self._styles:
result = self._styles[node.tag](node)
for key in result:
if key in self.style:
self.style[key].update(result[key])
else:
self.style[key] = result[key]
def font_size_get(self,tag):
size = utils.unit_get(self.style.get('td', {}).get('font-size','16'))
return size
def get(self,tag):
if not tag in self.style:
return ""
return ';'.join(['%s:%s' % (x[0],x[1]) for x in self.style[tag].items()])
class _rml_template(object):
def __init__(self, template, localcontext=None):
self.frame_pos = -1
self.localcontext = localcontext
self.frames = []
self.template_order = []
self.page_template = {}
self.loop = 0
self._tags = {
'drawString': _rml_tmpl_draw_string,
'drawRightString': _rml_tmpl_draw_string,
'drawCentredString': _rml_tmpl_draw_string,
'lines': _rml_tmpl_draw_lines
}
self.style = _rml_draw_style()
rc = 'data:image/png;base64,'
self.data = ''
for pt in template.findall('pageTemplate'):
frames = {}
id = pt.get('id')
self.template_order.append(id)
for tmpl in pt.findall('frame'):
posy = int(utils.unit_get(tmpl.get('y1')))
posx = int(utils.unit_get(tmpl.get('x1')))
frames[(posy,posx,tmpl.get('id'))] = _rml_tmpl_frame(posx, utils.unit_get(tmpl.get('width')))
for tmpl in pt.findall('pageGraphics'):
for n in tmpl:
if n.tag == 'image':
self.data = rc + utils._process_text(self, n.text)
if n.tag in self._tags:
t = self._tags[n.tag](n, self.style,self.localcontext)
frames[(t.posy,t.posx,n.tag)] = t
else:
self.style.update(n)
keys = frames.keys()
keys.sort()
keys.reverse()
self.page_template[id] = []
for key in range(len(keys)):
if key>0 and keys[key-1][0] == keys[key][0]:
if type(self.page_template[id][-1]) == type(frames[keys[key]]):
if self.page_template[id][-1].tag_mergeable():
self.page_template[id][-1].merge(frames[keys[key]])
continue
self.page_template[id].append(frames[keys[key]])
self.template = self.template_order[0]
def _get_style(self):
return self.style
def set_next_template(self):
self.template = self.template_order[(self.template_order.index(name)+1) % self.template_order]
self.frame_pos = -1
def set_template(self, name):
self.template = name
self.frame_pos = -1
def frame_start(self):
result = ''
frames = self.page_template[self.template]
ok = True
while ok:
self.frame_pos += 1
if self.frame_pos>=len(frames):
self.frame_pos=0
self.loop=1
ok = False
continue
f = frames[self.frame_pos]
result+=f.tag_start()
ok = not f.tag_end()
if ok:
result+=f.tag_stop()
return result
def frame_stop(self):
frames = self.page_template[self.template]
f = frames[self.frame_pos]
result=f.tag_stop()
return result
def start(self):
return ''
def end(self):
result = ''
while not self.loop:
result += self.frame_start()
result += self.frame_stop()
return result
class _rml_doc(object):
def __init__(self, data, localcontext):
self.dom = etree.XML(data)
self.localcontext = localcontext
self.filename = self.dom.get('filename')
self.result = ''
def render(self, out):
self.result += '''<!DOCTYPE HTML PUBLIC "-//w3c//DTD HTML 4.0 Frameset//EN">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<style type="text/css">
p {margin:0px; font-size:12px;}
td {font-size:14px;}
'''
style = self.dom.findall('stylesheet')[0]
s = _rml_stylesheet(self.localcontext, style, self.dom)
self.result += s.render()
self.result+='''
</style>
'''
list_story =[]
for story in utils._child_get(self.dom, self, 'story'):
template = _rml_template(self.dom.findall('template')[0], self.localcontext)
f = _flowable(template, self.dom, localcontext = self.localcontext)
story_text = f.render(story)
list_story.append(story_text)
del f
if template.data:
tag = '''<img src = '%s' width=80 height=72/>'''% template.data
else:
tag = ''
self.result +='''
<script type="text/javascript">
var indexer = 0;
var aryTest = %s ;
function nextData()
{
if(indexer < aryTest.length -1)
{
indexer += 1;
document.getElementById("tiny_data").innerHTML=aryTest[indexer];
}
}
function prevData()
{
if (indexer > 0)
{
indexer -= 1;
document.getElementById("tiny_data").innerHTML=aryTest[indexer];
}
}
</script>
</head>
<body>
%s
<div id="tiny_data">
%s
</div>
<br>
<input type="button" value="next" onclick="nextData();">
<input type="button" value="prev" onclick="prevData();">
</body></html>'''%(list_story,tag,list_story[0])
out.write( self.result)
def parseString(data,localcontext = {}, fout=None):
r = _rml_doc(data, localcontext)
if fout:
fp = file(fout,'wb')
r.render(fp)
fp.close()
return fout
else:
fp = cStringIO.StringIO()
r.render(fp)
return fp.getvalue()
def rml2html_help():
print 'Usage: rml2html input.rml >output.html'
print 'Render the standard input (RML) and output an HTML file'
sys.exit(0)
if __name__=="__main__":
if len(sys.argv)>1:
if sys.argv[1]=='--help':
rml2html_help()
print parseString(file(sys.argv[1], 'r').read()),
else:
print 'Usage: rml2html input.rml >output.html'
print 'Try \'rml2html --help\' for more information.'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
OaklandPeters/sublp | sublp/test_invoke.py | 1 | 1863 | """
Opener tests - opens SublimeText
Do not run with standard unittests.
"""
import os
import unittest
from . import dispatch_cases
from . import dispatcher
from . import support
# Set correct directory for testing
file_dir = support.normalize_path(os.path.split(__file__)[0])
if not os.getcwd() == file_dir:
os.chdir(file_dir)
class InvokeTests(unittest.TestCase):
def setUp(self):
pass
def test_OpenFromProjectFilePath(self): #pylint:disable=C0103
"""Open based on path to project file"""
_string = os.path.join("test_bypath", "bypath")
case = dispatch_cases.OpenProjectFromFilePath()
self.assertTrue(case.matches(_string))
dispatcher.Sublp.invoke(case, _string)
def test_OpenFromProjectName(self): #pylint:disable=C0103
"""Open based on name of project - contained in standard
projects directory."""
_string = "byname"
projects_directory = "test_standard_projects_directory/"
case = dispatch_cases.OpenProjectFromName(
projects_directory=projects_directory
)
self.assertTrue(case.matches(_string))
dispatcher.Sublp.invoke(case, _string)
def test_OpenFromDirectory(self): #pylint:disable=C0103
"""Open based on name of directory containing projects file."""
_string = "test_project_directory"
case = dispatch_cases.OpenProjectFromDirectory()
self.assertTrue(case.matches(_string))
dispatcher.Sublp.invoke(case, _string)
def test_OpenProjectFallback(self): #pylint:disable=C0103
"""Run fallback -- no projects file."""
_string = "no_project_file"
case = dispatch_cases.OpenProjectFallback()
self.assertTrue(case.matches(_string))
dispatcher.Sublp.invoke(case, _string)
if __name__ == "__main__":
unittest.main()
| mit |
shsingh/ansible | test/units/modules/network/fortios/test_fortios_system_pppoe_interface.py | 21 | 13357 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_pppoe_interface
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_pppoe_interface.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_pppoe_interface_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_pppoe_interface': {
'ac_name': 'test_value_3',
'auth_type': 'auto',
'device': 'test_value_5',
'dial_on_demand': 'enable',
'disc_retry_timeout': '7',
'idle_timeout': '8',
'ipunnumbered': 'test_value_9',
'ipv6': 'enable',
'lcp_echo_interval': '11',
'lcp_max_echo_fails': '12',
'name': 'default_name_13',
'padt_retry_timeout': '14',
'password': 'test_value_15',
'pppoe_unnumbered_negotiate': 'enable',
'service_name': 'test_value_17',
'username': 'test_value_18'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_pppoe_interface.fortios_system(input_data, fos_instance)
expected_data = {
'ac-name': 'test_value_3',
'auth-type': 'auto',
'device': 'test_value_5',
'dial-on-demand': 'enable',
'disc-retry-timeout': '7',
'idle-timeout': '8',
'ipunnumbered': 'test_value_9',
'ipv6': 'enable',
'lcp-echo-interval': '11',
'lcp-max-echo-fails': '12',
'name': 'default_name_13',
'padt-retry-timeout': '14',
'password': 'test_value_15',
'pppoe-unnumbered-negotiate': 'enable',
'service-name': 'test_value_17',
'username': 'test_value_18'
}
set_method_mock.assert_called_with('system', 'pppoe-interface', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_pppoe_interface_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_pppoe_interface': {
'ac_name': 'test_value_3',
'auth_type': 'auto',
'device': 'test_value_5',
'dial_on_demand': 'enable',
'disc_retry_timeout': '7',
'idle_timeout': '8',
'ipunnumbered': 'test_value_9',
'ipv6': 'enable',
'lcp_echo_interval': '11',
'lcp_max_echo_fails': '12',
'name': 'default_name_13',
'padt_retry_timeout': '14',
'password': 'test_value_15',
'pppoe_unnumbered_negotiate': 'enable',
'service_name': 'test_value_17',
'username': 'test_value_18'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_pppoe_interface.fortios_system(input_data, fos_instance)
expected_data = {
'ac-name': 'test_value_3',
'auth-type': 'auto',
'device': 'test_value_5',
'dial-on-demand': 'enable',
'disc-retry-timeout': '7',
'idle-timeout': '8',
'ipunnumbered': 'test_value_9',
'ipv6': 'enable',
'lcp-echo-interval': '11',
'lcp-max-echo-fails': '12',
'name': 'default_name_13',
'padt-retry-timeout': '14',
'password': 'test_value_15',
'pppoe-unnumbered-negotiate': 'enable',
'service-name': 'test_value_17',
'username': 'test_value_18'
}
set_method_mock.assert_called_with('system', 'pppoe-interface', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_pppoe_interface_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_pppoe_interface': {
'ac_name': 'test_value_3',
'auth_type': 'auto',
'device': 'test_value_5',
'dial_on_demand': 'enable',
'disc_retry_timeout': '7',
'idle_timeout': '8',
'ipunnumbered': 'test_value_9',
'ipv6': 'enable',
'lcp_echo_interval': '11',
'lcp_max_echo_fails': '12',
'name': 'default_name_13',
'padt_retry_timeout': '14',
'password': 'test_value_15',
'pppoe_unnumbered_negotiate': 'enable',
'service_name': 'test_value_17',
'username': 'test_value_18'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_pppoe_interface.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'pppoe-interface', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_pppoe_interface_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_pppoe_interface': {
'ac_name': 'test_value_3',
'auth_type': 'auto',
'device': 'test_value_5',
'dial_on_demand': 'enable',
'disc_retry_timeout': '7',
'idle_timeout': '8',
'ipunnumbered': 'test_value_9',
'ipv6': 'enable',
'lcp_echo_interval': '11',
'lcp_max_echo_fails': '12',
'name': 'default_name_13',
'padt_retry_timeout': '14',
'password': 'test_value_15',
'pppoe_unnumbered_negotiate': 'enable',
'service_name': 'test_value_17',
'username': 'test_value_18'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_pppoe_interface.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'pppoe-interface', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_pppoe_interface_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_pppoe_interface': {
'ac_name': 'test_value_3',
'auth_type': 'auto',
'device': 'test_value_5',
'dial_on_demand': 'enable',
'disc_retry_timeout': '7',
'idle_timeout': '8',
'ipunnumbered': 'test_value_9',
'ipv6': 'enable',
'lcp_echo_interval': '11',
'lcp_max_echo_fails': '12',
'name': 'default_name_13',
'padt_retry_timeout': '14',
'password': 'test_value_15',
'pppoe_unnumbered_negotiate': 'enable',
'service_name': 'test_value_17',
'username': 'test_value_18'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_pppoe_interface.fortios_system(input_data, fos_instance)
expected_data = {
'ac-name': 'test_value_3',
'auth-type': 'auto',
'device': 'test_value_5',
'dial-on-demand': 'enable',
'disc-retry-timeout': '7',
'idle-timeout': '8',
'ipunnumbered': 'test_value_9',
'ipv6': 'enable',
'lcp-echo-interval': '11',
'lcp-max-echo-fails': '12',
'name': 'default_name_13',
'padt-retry-timeout': '14',
'password': 'test_value_15',
'pppoe-unnumbered-negotiate': 'enable',
'service-name': 'test_value_17',
'username': 'test_value_18'
}
set_method_mock.assert_called_with('system', 'pppoe-interface', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_pppoe_interface_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_pppoe_interface': {
'random_attribute_not_valid': 'tag',
'ac_name': 'test_value_3',
'auth_type': 'auto',
'device': 'test_value_5',
'dial_on_demand': 'enable',
'disc_retry_timeout': '7',
'idle_timeout': '8',
'ipunnumbered': 'test_value_9',
'ipv6': 'enable',
'lcp_echo_interval': '11',
'lcp_max_echo_fails': '12',
'name': 'default_name_13',
'padt_retry_timeout': '14',
'password': 'test_value_15',
'pppoe_unnumbered_negotiate': 'enable',
'service_name': 'test_value_17',
'username': 'test_value_18'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_pppoe_interface.fortios_system(input_data, fos_instance)
expected_data = {
'ac-name': 'test_value_3',
'auth-type': 'auto',
'device': 'test_value_5',
'dial-on-demand': 'enable',
'disc-retry-timeout': '7',
'idle-timeout': '8',
'ipunnumbered': 'test_value_9',
'ipv6': 'enable',
'lcp-echo-interval': '11',
'lcp-max-echo-fails': '12',
'name': 'default_name_13',
'padt-retry-timeout': '14',
'password': 'test_value_15',
'pppoe-unnumbered-negotiate': 'enable',
'service-name': 'test_value_17',
'username': 'test_value_18'
}
set_method_mock.assert_called_with('system', 'pppoe-interface', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
osrg/ryu | ryu/lib/packet/gre.py | 7 | 7978 | # Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from ryu.lib.pack_utils import msg_pack_into
from . import packet_base
from . import packet_utils
from . import ether_types
GRE_CHECKSUM_FLG = 1 << 7
GRE_KEY_FLG = 1 << 5
GRE_SEQUENCE_NUM_FLG = 1 << 4
class gre(packet_base.PacketBase):
"""GRE (RFC2784,RFC2890) header encoder/decoder class.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
============== ========================================================
Attribute Description
============== ========================================================
version Version.
protocol Protocol Type field.
The Protocol Type is defined as "ETHER TYPES".
checksum Checksum field(optional).
When you set a value other than None,
this field will be automatically calculated.
key Key field(optional)
This field is intended to be used for identifying
an individual traffic flow within a tunnel.
vsid Virtual Subnet ID field(optional)
This field is a 24-bit value that is used
to identify the NVGRE-based Virtual Layer 2 Network.
flow_id FlowID field(optional)
This field is an 8-bit value that is used to provide
per-flow entropy for flows in the same VSID.
seq_number Sequence Number field(optional)
============== ========================================================
"""
_PACK_STR = "!BBH"
_CHECKSUM_PACK_STR = "!H2x"
_KEY_PACK_STR = "!I"
_SEQNUM_PACK_STR = "!I"
_MIN_LEN = struct.calcsize(_PACK_STR)
_CHECKSUM_LEN = struct.calcsize(_CHECKSUM_PACK_STR)
_KEY_LEN = struct.calcsize(_KEY_PACK_STR)
_SEQNUM_PACK_LEN = struct.calcsize(_SEQNUM_PACK_STR)
# GRE header
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |C| |K|S| Reserved0 | Ver | Protocol Type |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Checksum (optional) | Reserved1 (Optional) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key (optional) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Sequence Number (Optional) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
def __init__(self, version=0, protocol=ether_types.ETH_TYPE_IP,
checksum=None, key=None, vsid=None, flow_id=None,
seq_number=None):
super(gre, self).__init__()
self.version = version
self.protocol = protocol
self.checksum = checksum
self.seq_number = seq_number
if key is not None:
self._key = key
self._vsid = self._key >> 8
self._flow_id = self._key & 0xff
elif (vsid is not None) and (flow_id is not None):
self._key = vsid << 8 | flow_id
self._vsid = vsid
self._flow_id = flow_id
else:
self._key = None
self._vsid = None
self._flow_id = None
@property
def key(self):
return self._key
@key.setter
def key(self, key):
if key is not None:
self._key = key
self._vsid = self._key >> 8
self._flow_id = self._key & 0xff
else:
self._key = None
self._vsid = None
self._flow_id = None
@property
def vsid(self):
return self._vsid
@vsid.setter
def vsid(self, vsid):
self._key = vsid << 8 | (self._key & 0xff)
self._vsid = vsid
@property
def flow_id(self):
return self._flow_id
@flow_id.setter
def flow_id(self, flow_id):
self._key = (self._key & 0xffffff00) | flow_id
self._flow_id = flow_id
@classmethod
def parser(cls, buf):
present, version, protocol = struct.unpack_from(cls._PACK_STR, buf)
gre_offset = gre._MIN_LEN
checksum = None
key = None
seq_number = None
if present & GRE_CHECKSUM_FLG:
checksum, = struct.unpack_from(cls._CHECKSUM_PACK_STR,
buf, gre_offset)
gre_offset += cls._CHECKSUM_LEN
if present & GRE_KEY_FLG:
key, = struct.unpack_from(cls._KEY_PACK_STR, buf, gre_offset)
gre_offset += cls._KEY_LEN
if present & GRE_SEQUENCE_NUM_FLG:
seq_number, = struct.unpack_from(cls._SEQNUM_PACK_STR,
buf, gre_offset)
gre_offset += cls._SEQNUM_PACK_LEN
msg = cls(version=version, protocol=protocol, checksum=checksum,
key=key, seq_number=seq_number)
from . import ethernet
gre._TYPES = ethernet.ethernet._TYPES
gre.register_packet_type(ethernet.ethernet,
ether_types.ETH_TYPE_TEB)
return msg, gre.get_packet_type(protocol), buf[gre_offset:]
def serialize(self, payload=None, prev=None):
present = 0
hdr = bytearray()
optional = bytearray()
if self.checksum is not None:
present |= GRE_CHECKSUM_FLG
# For purposes of computing the checksum,
# the value of the checksum field is zero.
# Also, because Reserved1 is always 0x00 of 2 bytes,
# Set in conjunction with checksum.
optional += b'\x00' * self._CHECKSUM_LEN
if self._key is not None:
present |= GRE_KEY_FLG
optional += struct.pack(self._KEY_PACK_STR, self._key)
if self.seq_number is not None:
present |= GRE_SEQUENCE_NUM_FLG
optional += struct.pack(self._SEQNUM_PACK_STR, self.seq_number)
msg_pack_into(self._PACK_STR, hdr, 0, present, self.version,
self.protocol)
hdr += optional
if self.checksum:
self.checksum = packet_utils.checksum(hdr)
struct.pack_into(self._CHECKSUM_PACK_STR, hdr, self._MIN_LEN,
self.checksum)
return hdr
def nvgre(version=0, vsid=0, flow_id=0):
"""
Generate instance of GRE class with information for NVGRE (RFC7637).
:param version: Version.
:param vsid: Virtual Subnet ID.
:param flow_id: FlowID.
:return: Instance of GRE class with information for NVGRE.
"""
# NVGRE header
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |0| |1|0| Reserved0 | Ver | Protocol Type 0x6558 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Virtual Subnet ID (VSID) | FlowID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
return gre(version=version, protocol=ether_types.ETH_TYPE_TEB,
vsid=vsid, flow_id=flow_id)
| apache-2.0 |
nik-hil/ODL-REST-PythonSDK | ODL/restapi.py | 1 | 3597 | import requests
import json
import logging
from requests.auth import HTTPBasicAuth
#TODO: implement logger
class RestAPI():
"""Intialize the variable with default values
Use this object to use rest api
"""
def __init__(self, **kwargs):
'''Pass a dict with following default variables
o = RestAPI({ipaddress:'1.1.1.1',port:'80'})
'''
self.ipaddress = '127.0.0.1'
self.port = '8080'
self.username = 'admin'
self.password = 'admin'
self.http = 'http'
if kwargs:
for k,v in kwargs.iteritems():
setattr(self, k, v)
self.base_url = self.http + "://" + self.ipaddress + ":" + self.port
self.auth = HTTPBasicAuth(self.username, self.password)
def get_url(self,rawurl):
'''converts raw url to url for rest api
'''
if not rawurl or not isinstance(rawurl , str):
return "Error: rawurl is not valid"
return self.base_url + rawurl
def send_reqests(self,action,url,flow=None):
'''Dyanmically determines the action type. Calls the respective function
action : get or put or delete or post
url : complete url to which you want to send request
flow : dict data to sent along the request. It will be converted in Json format
>>>send_requests('get','www.example.com')
'''
request_type = None
if isinstance(action, str):
if action:
action = action.lower()
if action in ['get', 'put', 'post', 'delete']:
request_type = getattr(requests, action)
if not request_type:
return 'Error: No action found'
#FIXME: ADD try catch method along with logger
if flow:
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = json.dumps(flow)
response = request_type(url, data=data, headers=headers, auth=self.auth)
else:
response = request_type(url, auth=self.auth)
return response
def url_modifier(self,raw_url, url_params):
'''converts raw_url with dictionary supplied parameters
'''
if raw_url:
url_list = raw_url.split('/')
for i, ele in enumerate(url_list):
if '{' in ele:
temp = ele.replace('{',"")
temp = temp.replace('}',"")
try:
url_list [i] = url_params[temp]
except KeyError:
return "Error: " + temp + " is not present in dictionary"
return "/".join(url_list)
return ""
def get_result(self,action,raw_url,url_params,flow=None):
''' get_result
'''
if action and raw_url and url_params:
url_call = self.url_modifier(raw_url.strip(),url_params)
url = self.get_url(url_call)
return self.send_reqests(action, url, flow)
return "Error: one or more parameters were not valid"
if __name__=='__main__':
api = RestAPI(ipaddress='192.168.56.101')
raw_url = '/controller/nb/v2/statistics/{containerName}/flow'
url_params = {'containerName':'default'}
action = 'get'
result = api.get_result(action, raw_url, url_params)
print result.json() | mit |
kcpawan/django | tests/template_tests/filter_tests/test_slice.py | 428 | 1317 | from django.template.defaultfilters import slice_filter
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class SliceTests(SimpleTestCase):
@setup({'slice01': '{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}'})
def test_slice01(self):
output = self.engine.render_to_string('slice01', {'a': 'a&b', 'b': mark_safe('a&b')})
self.assertEqual(output, '&b &b')
@setup({'slice02': '{% autoescape off %}{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}{% endautoescape %}'})
def test_slice02(self):
output = self.engine.render_to_string('slice02', {'a': 'a&b', 'b': mark_safe('a&b')})
self.assertEqual(output, '&b &b')
class FunctionTests(SimpleTestCase):
def test_zero_length(self):
self.assertEqual(slice_filter('abcdefg', '0'), '')
def test_index(self):
self.assertEqual(slice_filter('abcdefg', '1'), 'a')
def test_negative_index(self):
self.assertEqual(slice_filter('abcdefg', '-1'), 'abcdef')
def test_range(self):
self.assertEqual(slice_filter('abcdefg', '1:2'), 'b')
def test_range_multiple(self):
self.assertEqual(slice_filter('abcdefg', '1:3'), 'bc')
def test_range_step(self):
self.assertEqual(slice_filter('abcdefg', '0::2'), 'aceg')
| bsd-3-clause |
antoan2/incubator-mxnet | python/mxnet/gluon/parameter.py | 2 | 25058 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=
"""Neural network parameter."""
__all__ = ['DeferredInitializationError', 'Parameter', 'ParameterDict',
'tensor_types']
from collections import OrderedDict
import warnings
import numpy as np
from ..base import mx_real_t, MXNetError
from .. import symbol, ndarray, initializer, context
from ..context import Context
from .. import autograd
from .utils import _indent
# pylint: disable= invalid-name
tensor_types = (symbol.Symbol, ndarray.NDArray)
# pylint: enable= invalid-name
class DeferredInitializationError(MXNetError):
"""Error for unfinished deferred initialization."""
pass
class Parameter(object):
"""A Container holding parameters (weights) of Blocks.
:py:class:`Parameter` holds a copy of the parameter on each :py:class:`Context` after
it is initialized with ``Parameter.initialize(...)``. If :py:attr:`grad_req` is
not ``'null'``, it will also hold a gradient array on each :py:class:`Context`::
ctx = mx.gpu(0)
x = mx.nd.zeros((16, 100), ctx=ctx)
w = mx.gluon.Parameter('fc_weight', shape=(64, 100), init=mx.init.Xavier())
b = mx.gluon.Parameter('fc_bias', shape=(64,), init=mx.init.Zero())
w.initialize(ctx=ctx)
b.initialize(ctx=ctx)
out = mx.nd.FullyConnected(x, w.data(ctx), b.data(ctx), num_hidden=64)
Parameters
----------
name : str
Name of this parameter.
grad_req : {'write', 'add', 'null'}, default 'write'
Specifies how to update gradient to grad arrays.
- ``'write'`` means everytime gradient is written to grad :py:class:`NDArray`.
- ``'add'`` means everytime gradient is added to the grad :py:class:`NDArray`. You need
to manually call ``zero_grad()`` to clear the gradient buffer before each
iteration when using this option.
- 'null' means gradient is not requested for this parameter. gradient arrays
will not be allocated.
shape : tuple of int, default None
Shape of this parameter. By default shape is not specified. Parameter with
unknown shape can be used for :py:class:`Symbol` API, but ``init`` will throw an error
when using :py:class:`NDArray` API.
dtype : numpy.dtype or str, default 'float32'
Data type of this parameter. For example, ``numpy.float32`` or ``'float32'``.
lr_mult : float, default 1.0
Learning rate multiplier. Learning rate will be multiplied by lr_mult
when updating this parameter with optimizer.
wd_mult : float, default 1.0
Weight decay multiplier (L2 regularizer coefficient). Works similar to lr_mult.
init : Initializer, default None
Initializer of this parameter. Will use the global initializer by default.
Attributes
----------
grad_req : {'write', 'add', 'null'}
This can be set before or after initialization. Setting ``grad_req`` to ``'null'``
with ``x.grad_req = 'null'`` saves memory and computation when you don't
need gradient w.r.t x.
lr_mult : float
Local learning rate multiplier for this Parameter. The actual learning rate
is calculated with ``learning_rate * lr_mult``. You can set it with
``param.lr_mult = 2.0``
wd_mult : float
Local weight decay multiplier for this Parameter.
"""
def __init__(self, name, grad_req='write', shape=None, dtype=mx_real_t,
lr_mult=1.0, wd_mult=1.0, init=None, allow_deferred_init=False,
differentiable=True):
self._var = None
self._data = None
self._grad = None
self._ctx_list = None
self._ctx_map = None
self._deferred_init = ()
self._differentiable = differentiable
self._allow_deferred_init = allow_deferred_init
self._grad_req = None
self.name = name
self.shape = shape
self.dtype = dtype
self.lr_mult = lr_mult
self.wd_mult = wd_mult
self.grad_req = grad_req
self.init = init
def __repr__(self):
s = 'Parameter {name} (shape={shape}, dtype={dtype})'
return s.format(**self.__dict__)
@property
def grad_req(self):
return self._grad_req
@grad_req.setter
def grad_req(self, req):
assert req in ['write', 'add', 'null'], \
"grad_req must be one of write, add, or null, but got %s"%req
if not self._differentiable:
req = 'null'
if self._grad_req == req:
return
self._grad_req = req
if req == 'null' and self._grad is not None:
self._grad = None
self._data = [i.detach() for i in self._data]
elif self._data is not None:
self._init_grad()
def _check_and_get(self, arr_list, ctx):
if arr_list is not None:
if ctx is list:
return arr_list
if ctx is None:
if len(arr_list) == 1:
return arr_list[0]
else:
ctx = context.current_context()
idx = self._ctx_map[ctx.device_typeid][ctx.device_id]
if idx is not None:
return arr_list[idx]
raise RuntimeError(
"Parameter %s was not initialized on context %s. "
"It was only initialized on %s."%(
self.name, str(ctx), str(self._ctx_list)))
if self._deferred_init:
raise DeferredInitializationError(
"Parameter %s has not been initialized yet because initialization was " \
"deferred. Actual initialization happens during the first forward pass. " \
"Please pass one batch of data through the network before accessing Parameters. " \
"You can also avoid deferred initialization by specifying in_units, " \
"num_features, etc., for network layers."%(self.name))
raise RuntimeError(
"Parameter %s has not been initialized. Note that " \
"you should initialize parameters and create Trainer " \
"with Block.collect_params() instead of Block.params " \
"because the later does not include Parameters of " \
"nested child Blocks"%(self.name))
def _load_init(self, data, ctx):
"""(Re)initializes by loading from data."""
if self.shape:
for i, j in zip(self.shape, data.shape):
assert i == 0 or i == j, \
"Failed loading Parameter %s from saved params: " \
"shape incompatible expacted %s vs saved %s"%(
self.name, str(self.shape), str(data.shape))
if self.dtype:
assert np.dtype(self.dtype).type == data.dtype, \
"Failed loading Parameter %s from saved params: " \
"dtype incompatible expacted %s vs saved %s"%(
self.name, str(self.dtype), str(data.dtype))
if isinstance(ctx, Context):
ctx = [ctx]
if self._data is None:
if self._deferred_init:
assert set(ctx) == set(self._deferred_init[1]), \
"Failed to load Parameter %s on %s because it was " \
"previous initialized on %s."%(
self.name, str(ctx), str(self.list_ctx()))
self._init_impl(data, ctx)
else:
assert set(ctx) == set(self.list_ctx()), \
"Failed to load Parameter %s on %s because it was " \
"previous initialized on %s."%(
self.name, str(ctx), str(self.list_ctx()))
self.set_data(data)
self._deferred_init = ()
def _finish_deferred_init(self):
"""Finishes deferred initialization."""
if not self._deferred_init:
return
init, ctx, default_init = self._deferred_init
self._deferred_init = ()
assert self.shape is not None and np.prod(self.shape) > 0, \
"Cannot initialize Parameter %s because it has " \
"invalid shape: %s. Please specify in_units, " \
"in_channels, etc for `Block`s."%(
self.name, str(self.shape))
with autograd.pause():
data = ndarray.zeros(shape=self.shape, dtype=self.dtype,
ctx=context.cpu())
initializer.create(default_init)(
initializer.InitDesc(self.name, {'__init__': init}), data)
self._init_impl(data, ctx)
def _init_impl(self, data, ctx_list):
"""Sets data and grad."""
self._ctx_list = list(ctx_list)
self._ctx_map = []
for i, ctx in enumerate(self._ctx_list):
while len(self._ctx_map) <= ctx.device_typeid:
self._ctx_map.append([])
dev_list = self._ctx_map[ctx.device_typeid]
while len(dev_list) <= ctx.device_id:
dev_list.append(None)
dev_list[ctx.device_id] = i
self._data = [data.copyto(ctx) for ctx in self._ctx_list]
self._init_grad()
def _init_grad(self):
"""Initialize grad buffers."""
if self.grad_req == 'null':
self._grad = None
return
self._grad = [ndarray.zeros_like(i) for i in self._data]
autograd.mark_variables(self.list_data(), self.list_grad(), self.grad_req)
def _reduce(self):
"""Reduce data from multiple context."""
block = self.list_data()
data = ndarray.add_n(*(w.copyto(context.cpu()) for w in block)) / len(block)
return data
def initialize(self, init=None, ctx=None, default_init=initializer.Uniform(),
force_reinit=False):
"""Initializes parameter and gradient arrays. Only used for :py:class:`NDArray` API.
Parameters
----------
init : Initializer
The initializer to use. Overrides :py:meth:`Parameter.init` and default_init.
ctx : Context or list of Context, defaults to :py:meth:`context.current_context()`.
Initialize Parameter on given context. If ctx is a list of Context, a
copy will be made for each context.
.. note::
Copies are independent arrays. User is responsible for keeping
their values consistent when updating.
Normally :py:class:`gluon.Trainer` does this for you.
default_init : Initializer
Default initializer is used when both :py:func:`init`
and :py:meth:`Parameter.init` are ``None``.
force_reinit : bool, default False
Whether to force re-initialization if parameter is already initialized.
Examples
--------
>>> weight = mx.gluon.Parameter('weight', shape=(2, 2))
>>> weight.initialize(ctx=mx.cpu(0))
>>> weight.data()
[[-0.01068833 0.01729892]
[ 0.02042518 -0.01618656]]
<NDArray 2x2 @cpu(0)>
>>> weight.grad()
[[ 0. 0.]
[ 0. 0.]]
<NDArray 2x2 @cpu(0)>
>>> weight.initialize(ctx=[mx.gpu(0), mx.gpu(1)])
>>> weight.data(mx.gpu(0))
[[-0.00873779 -0.02834515]
[ 0.05484822 -0.06206018]]
<NDArray 2x2 @gpu(0)>
>>> weight.data(mx.gpu(1))
[[-0.00873779 -0.02834515]
[ 0.05484822 -0.06206018]]
<NDArray 2x2 @gpu(1)>
"""
if self._data is not None and not force_reinit:
warnings.warn("Parameter %s is already initialized, ignoring. " \
"Set force_reinit=True to re-initialize."%self.name)
return
self._data = self._grad = None
if ctx is None:
ctx = [context.current_context()]
if isinstance(ctx, Context):
ctx = [ctx]
if init is None:
init = default_init if self.init is None else self.init
if not self.shape or np.prod(self.shape) <= 0:
if self._allow_deferred_init:
self._deferred_init = (init, ctx, default_init)
return
raise ValueError("Cannot initialize Parameter %s because it has " \
"invalid shape: %s."%(self.name, str(self.shape)))
self._deferred_init = (init, ctx, default_init)
self._finish_deferred_init()
def reset_ctx(self, ctx):
"""Re-assign Parameter to other contexts.
ctx : Context or list of Context, default ``context.current_context()``.
Assign Parameter to given context. If ctx is a list of Context, a
copy will be made for each context.
"""
if ctx is None:
ctx = [context.current_context()]
if isinstance(ctx, Context):
ctx = [ctx]
if self._data:
data = self._reduce()
with autograd.pause():
self._init_impl(data, ctx)
elif self._deferred_init:
init, _, default_init = self._deferred_init
self._deferred_init = (init, ctx, default_init)
else:
raise ValueError("Cannot reset context for Parameter %s because it "
"has not been initialized."%self.name)
def set_data(self, data):
"""Sets this parameter's value on all contexts to data."""
assert self._data is not None, \
"Parameter %s has not been initialized"%self.name
for arr in self.list_data():
arr[:] = data
def data(self, ctx=None):
"""Returns a copy of this parameter on one context. Must have been
initialized on this context before.
Parameters
----------
ctx : Context
Desired context.
Returns
-------
NDArray on ctx
"""
return self._check_and_get(self._data, ctx)
def list_data(self):
"""Returns copies of this parameter on all contexts, in the same order
as creation."""
return self._check_and_get(self._data, list)
def grad(self, ctx=None):
"""Returns a gradient buffer for this parameter on one context.
Parameters
----------
ctx : Context
Desired context.
"""
if self._data is not None and self._grad is None:
raise RuntimeError(
"Cannot get gradient array for Parameter %s " \
"because grad_req='null'"%(self.name))
return self._check_and_get(self._grad, ctx)
def list_grad(self):
"""Returns gradient buffers on all contexts, in the same order
as :py:meth:`values`."""
if self._data is not None and self._grad is None:
raise RuntimeError(
"Cannot get gradient array for Parameter %s " \
"because grad_req='null'"%(self.name))
return self._check_and_get(self._grad, list)
def list_ctx(self):
"""Returns a list of contexts this parameter is initialized on."""
if self._data is None:
if self._deferred_init:
return self._deferred_init[1]
raise RuntimeError("Parameter %s has not been initialized"%self.name)
return self._ctx_list
def zero_grad(self):
"""Sets gradient buffer on all contexts to 0. No action is taken if
parameter is uninitialized or doesn't require gradient."""
if self._grad is None:
return
for i in self._grad:
i[:] = 0
def var(self):
"""Returns a symbol representing this parameter."""
if self._var is None:
self._var = symbol.var(self.name, shape=self.shape, dtype=self.dtype,
lr_mult=self.lr_mult, wd_mult=self.wd_mult,
init=self.init)
return self._var
class ParameterDict(object):
"""A dictionary managing a set of parameters.
Parameters
----------
prefix : str, default ``''``
The prefix to be prepended to all Parameters' names created by this dict.
shared : ParameterDict or None
If not ``None``, when this dict's :py:meth:`get` method creates a new parameter, will
first try to retrieve it from "shared" dict. Usually used for sharing
parameters with another Block.
"""
def __init__(self, prefix='', shared=None):
self._prefix = prefix
self._params = OrderedDict()
self._shared = shared
def __repr__(self):
s = '{name}(\n{content}\n)'
name = self._prefix+' ' if self._prefix else ''
return s.format(name=name,
content='\n'.join([_indent(' {0}'.format(v), 2)
for v in self.values()]))
def __getitem__(self, key):
return self._params[key]
def __iter__(self):
return iter(self._params)
def items(self):
return self._params.items()
def keys(self):
return self._params.keys()
def values(self):
return self._params.values()
@property
def prefix(self):
"""Prefix of this dict. It will be prepended to :py:class:`Parameter`s' name created
with :py:func:`get`."""
return self._prefix
def _get_impl(self, name):
if name in self._params:
return self._params[name]
if self._shared is not None and name in self._shared._params:
self._params[name] = self._shared._params[name]
return self._shared._params[name]
return None
def get(self, name, **kwargs):
"""Retrieves a :py:class:`Parameter` with name ``self.prefix+name``. If not found,
:py:func:`get` will first try to retrieve it from "shared" dict. If still not
found, :py:func:`get` will create a new :py:class:`Parameter` with key-word arguments and
insert it to self.
Parameters
----------
name : str
Name of the desired Parameter. It will be prepended with this dictionary's
prefix.
**kwargs : dict
The rest of key-word arguments for the created :py:class:`Parameter`.
Returns
-------
Parameter
The created or retrieved :py:class:`Parameter`.
"""
name = self.prefix + name
param = self._get_impl(name)
if param is None:
param = Parameter(name, **kwargs)
self._params[name] = param
else:
for k, v in kwargs.items():
if hasattr(param, k) and getattr(param, k) is not None:
assert v is None or v == getattr(param, k), \
"Cannot retrieve Parameter %s because desired attribute " \
"does not match with stored for attribute %s: " \
"desired %s vs stored %s."%(
name, k, str(v), str(getattr(param, k)))
else:
setattr(param, k, v)
return param
def update(self, other):
"""Copies all Parameters in ``other`` to self."""
for k, v in other.items():
if k in self._params:
assert self._params[k] is v, \
"Cannot update self with other because they have different " \
"Parameters with the same name %s"%k
else:
self._params[k] = v
def initialize(self, init=initializer.Uniform(), ctx=None, verbose=False,
force_reinit=False):
"""Initializes all Parameters managed by this dictionary to be used for :py:class:`NDArray`
API. It has no effect when using :py:class:`Symbol` API.
Parameters
----------
init : Initializer
Global default Initializer to be used when :py:meth:`Parameter.init` is ``None``.
Otherwise, :py:meth:`Parameter.init` takes precedence.
ctx : Context or list of Context
Keeps a copy of Parameters on one or many context(s).
force_reinit : bool, default False
Whether to force re-initialization if parameter is already initialized.
"""
if verbose:
init.set_verbosity(verbose=verbose)
for _, v in self.items():
v.initialize(None, ctx, init, force_reinit=force_reinit)
def zero_grad(self):
"""Sets all Parameters' gradient buffer to 0."""
for i in self.values():
i.zero_grad()
def reset_ctx(self, ctx):
"""Re-assign all Parameters to other contexts.
ctx : Context or list of Context, default :py:meth:`context.current_context()`.
Assign Parameter to given context. If ctx is a list of Context, a
copy will be made for each context.
"""
for i in self.values():
i.reset_ctx(ctx)
def setattr(self, name, value):
"""Set an attribute to a new value for all Parameters.
For example, set grad_req to null if you don't need gradient w.r.t a
model's Parameters::
model.collect_params().setattr('grad_req', 'null')
or change the learning rate multiplier::
model.collect_params().setattr('lr_mult', 0.5)
Parameters
----------
name : str
Name of the attribute.
value : valid type for attribute name
The new value for the attribute.
"""
for i in self.values():
setattr(i, name, value)
def save(self, filename, strip_prefix=''):
"""Save parameters to file.
filename : str
Path to parameter file.
strip_prefix : str, default ''
Strip prefix from parameter names before saving.
"""
arg_dict = {}
for param in self.values():
weight = param._reduce()
if not param.name.startswith(strip_prefix):
raise ValueError(
"Prefix %s is to be striped before saving, but Parameter " \
"%s does not start with %s. If you are using Block.save_params, " \
"This may be due to your Block shares parameters from other " \
"Blocks or you forgot to use ``with name_scope()`` during init. " \
"Consider switching to Block.collect_params.save and " \
"Block.collect_params.load instead."%(
strip_prefix, param.name, strip_prefix))
arg_dict[param.name[len(strip_prefix):]] = weight
ndarray.save(filename, arg_dict)
def load(self, filename, ctx, allow_missing=False,
ignore_extra=False, restore_prefix=''):
"""Load parameters from file.
filename : str
Path to parameter file.
ctx : Context or list of Context
Context(s) initialize loaded parameters on.
allow_missing : bool, default False
Whether to silently skip loading parameters not represents in the file.
ignore_extra : bool, default False
Whether to silently ignore parameters from the file that are not
present in this ParameterDict.
restore_prefix : str, default ''
prepend prefix to names of stored parameters before loading.
"""
if restore_prefix:
for name in self.keys():
assert name.startswith(restore_prefix), \
"restore_prefix is %s but Parameters name %s does not start " \
"with %s"%(restore_prefix, name, restore_prefix)
lprefix = len(restore_prefix)
arg_dict = {restore_prefix+k: v for k, v in ndarray.load(filename).items()}
if not allow_missing:
for name in self.keys():
assert name in arg_dict, \
"Parameter %s is missing in file %s"%(name[lprefix:], filename)
for name in arg_dict:
if name not in self._params:
assert ignore_extra, \
"Parameter %s loaded from file %s is not present in ParameterDict"%(
name[lprefix:], filename)
continue
self[name]._load_init(arg_dict[name], ctx)
| apache-2.0 |
Ninad998/deepstylometry-python | fyp/StyloNeuralML.py | 1 | 3975 | #!/usr/bin/python
# -*- coding: utf-8 -*-
def getResults(authorList = None, doc_id = None, algo = None, chunk_size = 1000, level = 'word',
glove = '../glove/', samples = 300, dimensions = 200, dropout = 0.5):
if (authorList is None) or (doc_id is None) or (algo is None) or (doc_id == 0):
return None
else:
import CNNModelCreatorWordML as md
embedfile = 'glove.6B.' + str(dimensions) + 'd.txt'
embeddings_index = md.readVectorData(embedfile, GLOVE_DIR=glove)
(texts, labels, labels_index, samples) = md.loadAuthData(authorList, doc_id, chunk_size = chunk_size, samples = samples)
(trainX, trainY, valX, valY) = md.preProcessTrainVal(texts, labels, chunk_size = chunk_size)
embedding_matrix = None
if level == 'word':
embedding_matrix = md.prepareEmbeddingMatrix(embeddings_index, EMBEDDING_DIM = dimensions)
feature_model = md.recompileModelCNN(len(labels_index), embedding_matrix, chunk_size = chunk_size,
DROP_OUT = dropout, EMBEDDING_DIM = dimensions)
mlmodel = md.compileModelML(algo, new = True)
(trainX, trainY, valX, valY) = md.preProcessTrainVal(texts, labels, ml = True, chunk_size = chunk_size)
del texts, labels
(train_acc, val_acc) = md.fitModelML(feature_model, mlmodel, algo, trainX, trainY, valX, valY)
del feature_model
return (labels_index, train_acc, val_acc, samples)
def getTestResults(authorList = None, doc_id = None, labels_index = None, algo = None, chunk_size = 1000, level = 'word',
glove = '../glove/', dimensions = 200, dropout = 0.5, predYList_cnn = None):
if (authorList is None) or (labels_index is None) or (doc_id is None) or (doc_id == 0):
return None
else:
if level == 'char':
import CNNModelCreatorChar as md
else:
import CNNModelCreatorWordML as md
embedfile = 'glove.6B.' + str(dimensions) + 'd.txt'
embeddings_index = md.readVectorData(embedfile, GLOVE_DIR=glove)
md.makeTokenizer()
(testX, testY) = md.loadDocData(authorList, doc_id, chunk_size = chunk_size)
(testX, testY) = md.preProcessTest(testX, labels_index, testY, chunk_size = chunk_size)
embedding_matrix = None
embedding_matrix = md.prepareEmbeddingMatrix(embeddings_index, EMBEDDING_DIM = dimensions)
feature_model = md.recompileModelCNN(len(labels_index), embedding_matrix, chunk_size = chunk_size,
DROP_OUT = dropout, EMBEDDING_DIM = dimensions)
mlmodel = md.compileModelML(algo, new = False)
import numpy as np
testY = np.mean(testY, axis=0, dtype=int)
(predYList, predY) = md.predictModel(feature_model, mlmodel, testX, authorList)
del feature_model
newPredList = []
newPredList = np.concatenate((predYList_cnn, predYList))
(predList, predEntro) = entropy(newPredList, cutoff = 0.5)
return (predYList, predY, testY, predEntro)
def entropy(predList, cutoff = 0.5):
import numpy as np
predYList = predList[:]
entro = []
flag = False
import math
for row in predList:
entroval = 0
for i in row:
if(i <= 0):
flag = True
pass
else:
entroval += (i * (math.log(i , 2)))
entroval = -1 * entroval
entro.append(entroval)
if(flag == False):
yx = zip(entro, predList)
yx = sorted(yx, key = lambda t: t[0])
newPredY = [x for y, x in yx]
predYEntroList = newPredY[:int(len(newPredY)*cutoff)]
predY = np.mean(predYEntroList, axis=0)
else:
predY = np.mean(predYList, axis=0)
return (predYList, predY)
| mit |
0asa/scikit-learn | sklearn/utils/validation.py | 2 | 17489 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..base import BaseEstimator
from inspect import getargspec
class DataConversionWarning(UserWarning):
"A warning on implicit data conversions happening in the code"
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"A warning on implicit dispatch to numpy.dot"
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %r" % x)
return x.shape[0] if hasattr(x, 'shape') else len(x)
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: %s"
% str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse is None:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
sparse_type = spmatrix.format
if dtype is None:
dtype = spmatrix.dtype
if sparse_type in accept_sparse:
# correct type
if dtype == spmatrix.dtype:
# correct dtype
if copy:
spmatrix = spmatrix.copy()
else:
# convert dtype
spmatrix = spmatrix.astype(dtype)
else:
# create new
spmatrix = spmatrix.asformat(accept_sparse[0]).astype(dtype)
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
if hasattr(spmatrix, "data"):
spmatrix.data = np.array(spmatrix.data, copy=False, order=order)
return spmatrix
def check_array(array, accept_sparse=None, dtype=None, order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, order,
copy, force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
return array
def check_X_y(X, y, accept_sparse=None, dtype=None, order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_ouput=True to allow 2d and sparse y.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def warn_if_not_float(X, estimator='This algorithm'):
"""Warning utility function to check that data type is floating point.
Returns True if a warning was raised (i.e. the input is not float) and
False otherwise, for easier input validation.
"""
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
if X.dtype.kind != 'f':
warnings.warn("%s assumes floating point values as input, "
"got %s" % (estimator, X.dtype))
return True
return False
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Example
-------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise ValueError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name' : type(estimator).__name__})
| bsd-3-clause |
imiolek-ireneusz/pysiogame | i18n/custom/word_lists/ru_di.py | 1 | 21101 | # -*- coding: utf-8 -*-
# This is a list of words used by the word builder and word maze games and possibly
# other games built in the future.
# These words are mainly most commonly used words in Russian
# In each sub-list in the list di first number is a number of words in the sublist
# to avoid counting it every time list is selected
# The sublists are consisting of words with len() of 3 - 10
# They are appropriate for under 10 years old children
# For find dublicates: sed -e "s/'/\n/g" ./ru_di.py|sort |uniq -d
# All words were adapted with according pronunciation by espeak.
# List was made by Alexey Loginov
di = [
[67, 'акт', 'без', 'бор', 'был', 'ваш', 'век', 'вес', 'вид', 'все', 'газ', 'два', 'дно', 'дом', 'дух', 'дым', 'ель',
'ещё', 'жир', 'или', 'имя', 'йод', 'как', 'кит', 'кот', 'кто', 'лес', 'лов', 'лук', 'лёд', 'меч', 'мир', 'мог',
'мэр', 'мёд', 'нос', 'оба', 'под', 'пух', 'рад', 'раз', 'рис', 'рок', 'рот', 'ряд', 'сад', 'сам', 'сок', 'сон',
'сто', 'суп', 'сын', 'сэр', 'тех', 'ток', 'тон', 'три', 'уши', 'чай', 'чей', 'чем', 'чиж', 'шаг', 'шар', 'шип',
'шоу', 'это', 'яма'],
[164, 'аист', 'база', 'банк', 'бить', 'блеф', 'блик', 'блин', 'борщ', 'брат', 'бриз', 'брус', 'буря', 'быть',
'вера', 'вещь', 'визг', 'винт', 'вниз', 'воля', 'воск', 'врач', 'выше', 'глюк', 'годы', 'горе', 'горы', 'граф',
'гриб', 'гром', 'даль', 'дать', 'дача', 'двор', 'день', 'дети', 'диво', 'диск', 'доля', 'дочь', 'драп', 'друг',
'дюйм', 'дядя', 'енот', 'жаба', 'звон', 'звук', 'змей', 'знак', 'зона', 'зонд', 'зубы', 'игра', 'идея', 'изба',
'кедр', 'клей', 'клик', 'клип', 'клоп', 'клуб', 'ключ', 'кожа', 'корм', 'кофе', 'краб', 'край', 'кран', 'крах',
'крик', 'круг', 'курс', 'лапа', 'леди', 'лето', 'лист', 'люди', 'мама', 'марш', 'матч', 'мать', 'медь', 'мель',
'мера', 'мисс', 'мозг', 'моль', 'море', 'мост', 'мрак', 'муха', 'мыть', 'мясо', 'небо', 'нить', 'ноги', 'ноль',
'ночь', 'обед', 'ожог', 'отец', 'офис', 'пара', 'парк', 'петь', 'печь', 'писк', 'плен', 'плуг', 'поле', 'приз',
'путь', 'пять', 'рама', 'репа', 'речь', 'ритм', 'роза', 'ромб', 'рост', 'рыба', 'рысь', 'сани', 'свет', 'свой',
'семь', 'семя', 'сено', 'сила', 'след', 'снег', 'соус', 'спад', 'стих', 'стон', 'стук', 'тело', 'торф', 'трюк',
'тёща', 'ужин', 'урок', 'утро', 'фото', 'хлеб', 'храм', 'храп', 'хром', 'царь', 'цвет', 'цепь', 'шанс', 'шарф',
'шест', 'шина', 'шкаф', 'шлем', 'шрам', 'щель', 'этап', 'юбка', 'язык', 'яйца', 'ёжик'],
[230, 'абзац', 'автор', 'актёр', 'аллея', 'банка', 'барин', 'бирка', 'блеск', 'бочка', 'брюки', 'будка', 'буквы',
'букет', 'вальс', 'вдруг', 'ветви', 'ветвь', 'ветер', 'вечер', 'взрыв', 'взять', 'время', 'вырос', 'гайка',
'гений', 'глина', 'грамм', 'грант', 'гриль', 'грипп', 'груша', 'губки', 'дверь', 'дикий', 'дождь', 'драка',
'дрейф', 'дымок', 'дырка', 'живая', 'жизнь', 'завод', 'залог', 'запах', 'зверь', 'здесь', 'зебра', 'знали',
'знать', 'игрок', 'икона', 'иметь', 'карта', 'кисть', 'клерк', 'клоун', 'книга', 'ковёр', 'койка', 'комод',
'комок', 'конец', 'копьё', 'кости', 'кошка', 'крест', 'крыса', 'кудри', 'кусок', 'лапти', 'левый', 'лезть',
'леска', 'леший', 'лидер', 'линзы', 'лисий', 'лодка', 'ложка', 'лоток', 'лысый', 'мазок', 'майка', 'манок',
'мачта', 'между', 'мерка', 'место', 'много', 'может', 'морда', 'мошка', 'мусор', 'мышцы', 'назад', 'народ',
'наряд', 'нитка', 'новый', 'носок', 'нужно', 'обида', 'образ', 'обувь', 'ожоги', 'океан', 'осень', 'осина',
'осока', 'осётр', 'ответ', 'отчёт', 'охота', 'очень', 'пакет', 'парик', 'парта', 'парус', 'паста', 'пашня',
'песня', 'песок', 'пирог', 'племя', 'плечи', 'плоть', 'побег', 'повар', 'полюс', 'полёт', 'после', 'посох',
'поток', 'почта', 'поэма', 'право', 'пряжа', 'птица', 'птицы', 'пункт', 'пусто', 'пусть', 'пятна', 'радио',
'разум', 'рейка', 'речка', 'ротор', 'ручка', 'рядом', 'ряска', 'садик', 'салют', 'самый', 'сахар', 'свист',
'север', 'семьи', 'серый', 'сетка', 'синий', 'синяк', 'сироп', 'скоро', 'скраб', 'скунс', 'слива', 'слизь',
'слово', 'смесь', 'снова', 'совет', 'совок', 'соска', 'спирт', 'спица', 'спорт', 'спрей', 'ссора', 'стадо',
'сталь', 'стать', 'стены', 'стиль', 'страх', 'судно', 'сумка', 'сумма', 'сфера', 'тапки', 'тесто', 'тиски',
'точка', 'точно', 'треск', 'трубы', 'тыква', 'тёзка', 'укроп', 'умный', 'учёба', 'ферма', 'фильм', 'фирма',
'форма', 'фраза', 'фронт', 'фунты', 'хвост', 'хомяк', 'цапля', 'центр', 'центы', 'цифра', 'часто', 'часть',
'через', 'чудак', 'шашки', 'шесть', 'шляпа', 'шмель', 'шприц', 'шуруп', 'шёпот', 'щенок', 'щётка', 'якорь',
'яркий', 'ясный'],
[240, 'артист', 'банный', 'барсук', 'бедный', 'бедняк', 'берёза', 'беседа', 'бизнес', 'блузка', 'болото', 'больше',
'бронза', 'бросок', 'бумага', 'бумаги', 'быстро', 'важный', 'валюта', 'варвар', 'взгляд', 'вместе', 'вместо',
'воздух', 'войско', 'ворона', 'восемь', 'восток', 'вязать', 'газета', 'гвоздь', 'гибкий', 'глазок', 'глоток',
'голубь', 'гордый', 'горшок', 'градус', 'гранит', 'громко', 'грубый', 'группа', 'деньги', 'детали', 'дефект',
'добыча', 'доктор', 'долина', 'доллар', 'дорога', 'досада', 'другие', 'дружба', 'дурное', 'желать', 'железо',
'житель', 'жёлтый', 'забота', 'завтра', 'запасы', 'запись', 'захват', 'защита', 'звонок', 'звёзды', 'играть',
'калина', 'камень', 'карета', 'качели', 'кирпич', 'клапан', 'клетки', 'клубок', 'кнопка', 'кольца', 'колёса',
'копать', 'копыто', 'корень', 'корова', 'коровы', 'корона', 'корыто', 'костёр', 'краски', 'кресло', 'кризис',
'крошка', 'кружка', 'крылья', 'кузина', 'кузнец', 'купать', 'купить', 'куплет', 'куртка', 'лавина', 'лагерь',
'лежать', 'летать', 'липкий', 'листок', 'ловкий', 'люстра', 'лёгкий', 'малина', 'машина', 'меньше', 'мимоза',
'минуты', 'момент', 'мрачно', 'мягкий', 'мятный', 'насчёт', 'неделя', 'нижний', 'низкий', 'объект', 'обычно',
'огород', 'огурец', 'одежда', 'одетый', 'однако', 'оливки', 'опушка', 'оружие', 'особый', 'остров', 'острый',
'охрана', 'оценка', 'палить', 'пальма', 'пальцы', 'парный', 'первый', 'печать', 'печень', 'пещера', 'пикник',
'плакат', 'планка', 'платок', 'платье', 'плитка', 'победа', 'погода', 'подряд', 'помочь', 'правда', 'пресса',
'проект', 'прокат', 'просто', 'против', 'пугать', 'работа', 'радуга', 'ранний', 'растёт', 'реветь', 'рельсы',
'ремонт', 'решить', 'родник', 'рулить', 'ручеёк', 'рябина', 'свежий', 'сверху', 'свиньи', 'свитор', 'свёкла',
'секрет', 'сельдь', 'сжатый', 'сидеть', 'скобки', 'скупка', 'слепые', 'сливки', 'собака', 'совать', 'солдат',
'солома', 'списки', 'старик', 'старый', 'стойло', 'стоять', 'стремя', 'стёжка', 'сугроб', 'сундук', 'только',
'тонкий', 'тополь', 'тормоз', 'травка', 'тренер', 'тролль', 'трудно', 'тряпка', 'тушить', 'тёмный', 'тёплый',
'удалой', 'указка', 'улитка', 'улыбка', 'умение', 'урвать', 'утечка', 'учёные', 'уютный', 'фанера', 'фигура',
'фильтр', 'флейта', 'формат', 'фрукты', 'хлопок', 'ходули', 'хозяин', 'цветок', 'ценный', 'цунами', 'червяк',
'четыре', 'чистый', 'читать', 'чихать', 'чёрный', 'штопор', 'щёлочь', 'эффект', 'юннаты'],
[227, 'адвокат', 'актриса', 'аппетит', 'асфальт', 'белизна', 'берлога', 'блокнот', 'богатый', 'большие', 'ботинки',
'бранить', 'бригада', 'бродяга', 'бросать', 'бутылка', 'буханка', 'быстрый', 'варенье', 'василёк', 'ведущий',
'великий', 'веранда', 'верёвка', 'веселье', 'возврат', 'вопросы', 'всплеск', 'вспышка', 'высокий', 'главный',
'гладкий', 'горячих', 'готовых', 'грозный', 'депутат', 'деревня', 'деревья', 'держава', 'держать', 'детство',
'длинный', 'дневник', 'доверие', 'должник', 'дорожка', 'древний', 'дружина', 'духовка', 'дымоход', 'дыхание',
'желание', 'женатый', 'жужжать', 'завиток', 'загадка', 'закрыть', 'закуска', 'записка', 'записки', 'заслуга',
'зелёный', 'зелёных', 'зритель', 'империя', 'инвалид', 'инсульт', 'искрить', 'история', 'кабинет', 'картина',
'кассета', 'квадрат', 'кенгуру', 'кивнуть', 'кипяток', 'колонка', 'команда', 'конечно', 'конфета', 'копейка',
'корзина', 'коробка', 'котлета', 'который', 'красный', 'кровать', 'круглый', 'крупный', 'курсант', 'легенда',
'леденец', 'лечение', 'лиловый', 'личинка', 'ловушка', 'лопатка', 'лягушка', 'маршрут', 'масштаб', 'материк',
'материя', 'медведь', 'мелодия', 'мигание', 'молитва', 'моллюск', 'молодец', 'морская', 'мотылёк', 'мрачный',
'мужчины', 'мёртвые', 'надежда', 'наличие', 'невеста', 'неясный', 'новичок', 'носорог', 'обочина', 'образец',
'ожидать', 'озорник', 'озёрный', 'описать', 'отличие', 'падение', 'панцирь', 'перерыв', 'печенье', 'пешеход',
'планета', 'платить', 'пленный', 'плоский', 'повидло', 'поводок', 'поворот', 'поднять', 'подъезд', 'поехали',
'позиция', 'поймать', 'полоска', 'порошок', 'послали', 'привлёк', 'припасы', 'природа', 'причина', 'продать',
'продукт', 'прорубь', 'пулемёт', 'пустыня', 'пшеница', 'пёстрый', 'рабочие', 'равнины', 'размеры', 'расцвет',
'ребёнок', 'резинка', 'ремешок', 'реферат', 'решение', 'решётка', 'родился', 'розетка', 'ромашка', 'рубашка',
'рычание', 'самолёт', 'свадьба', 'сверчок', 'свисток', 'сегодня', 'сильный', 'системы', 'сказали', 'скребок',
'сладкий', 'слишком', 'сломать', 'служить', 'сметана', 'смеялся', 'создать', 'солёный', 'спасибо', 'средний',
'статный', 'стебель', 'стрелок', 'строгий', 'стройка', 'строчка', 'студент', 'студень', 'субъект', 'суффикс',
'считать', 'таблица', 'текучий', 'телёнок', 'теремок', 'тетрадь', 'товарищ', 'толстый', 'тяжёлый', 'украсть',
'условия', 'услышал', 'усмешка', 'учитель', 'учиться', 'философ', 'фуражка', 'холодец', 'хороший', 'храбрый',
'церковь', 'циркуль', 'человек', 'черника', 'широкий', 'шоколад', 'энергия'],
[151, 'алюминий', 'аргумент', 'больница', 'бренчать', 'вариться', 'введение', 'верхушка', 'весенний', 'веснушки',
'взорвали', 'винегрет', 'виноград', 'включить', 'водитель', 'возможно', 'возникли', 'воротник', 'выбирать',
'гармония', 'гвоздика', 'глубокий', 'говорить', 'говядина', 'голодать', 'горизонт', 'гребешок', 'грузовик',
'давление', 'движение', 'довольно', 'доказать', 'документ', 'достигли', 'животные', 'забивают', 'законный',
'застёжка', 'здоровье', 'землянка', 'изменить', 'инстинкт', 'инфекция', 'истинный', 'казалось', 'казённый',
'каракули', 'картинка', 'картошка', 'качаться', 'квартира', 'кислород', 'клубника', 'компания', 'комплекс',
'конфликт', 'короткий', 'крендель', 'кроватка', 'кукуруза', 'культура', 'лепесток', 'лимонный', 'малейший',
'мармелад', 'маслёнок', 'медицина', 'медпункт', 'местечко', 'методика', 'миллиард', 'молекулы', 'моллюски',
'нагрузка', 'надёжный', 'называть', 'напиться', 'наскоком', 'негромко', 'немножко', 'ненужный', 'обещание',
'обнимать', 'обращать', 'обходить', 'огромный', 'ожидание', 'оснастка', 'особенно', 'отделять', 'открытые',
'отходить', 'ощущение', 'пельмени', 'перчатка', 'петрушка', 'печатные', 'писатель', 'плавучий', 'подобный',
'пожарный', 'покрытые', 'полезный', 'ползунок', 'положить', 'получить', 'полюбить', 'понимают', 'поплавок',
'посетить', 'посыпать', 'почётный', 'практика', 'преграда', 'привычка', 'примерно', 'приправа', 'приятель',
'приятный', 'пробежка', 'проблема', 'продукты', 'радиатор', 'раздутый', 'различие', 'районный', 'растения',
'редакция', 'рисовать', 'сверлить', 'сельский', 'серийный', 'сказание', 'сложение', 'случайно', 'смотреть',
'снеговик', 'соседний', 'сравнить', 'страница', 'странный', 'студенты', 'сценарий', 'тащиться', 'типичный',
'торговля', 'футболка', 'холодный', 'царапины', 'черновик', 'черёмуха', 'читатель'],
[76, 'благодать', 'блондинки', 'вселенная', 'география', 'гладиолус', 'единичный', 'заполнены', 'заявление',
'известный', 'изумление', 'интонация', 'искусство', 'кондуктор', 'космонавт', 'моральный', 'наблюдать',
'наказание', 'насекомое', 'насекомые', 'население', 'настоящее', 'небоскрёб', 'невидимый', 'нуждаться',
'общежитие', 'объяснить', 'объяснять', 'одуванчик', 'отверстие', 'отдельный', 'платформа', 'поведение',
'повернуть', 'подводное', 'подождать', 'подставка', 'позволяет', 'помещение', 'понимание', 'поражение',
'последний', 'пословица', 'построить', 'правитель', 'президент', 'прижимать', 'прильнуть', 'принцесса',
'природный', 'провинция', 'проводник', 'программа', 'профессор', 'прохладно', 'разведчик', 'разделить',
'различные', 'разрезать', 'революция', 'результат', 'роскошный', 'свободный', 'служебный', 'случайный',
'смутиться', 'согласные', 'содержать', 'сотрудник', 'сохранить', 'страдание', 'ступенька', 'сцепление',
'телевизор', 'термометр', 'хозяйство', 'шотландка'],
[57, 'архитектор', 'безопасный', 'вентилятор', 'десятичная', 'дефицитные', 'деформация', 'достаточно', 'заклинание',
'зеркальный', 'знакомство', 'количество', 'косоглазие', 'крестьянин', 'любопытный', 'магазинный', 'математика',
'наводнение', 'насмешливо', 'настроение', 'начальство', 'неизбежный', 'неприятный', 'обеспечить', 'обнаружили',
'обречённый', 'обсуждение', 'объявление', 'оживлённая', 'отдалённый', 'палисадник', 'пластмасса', 'подбородок',
'поддержать', 'покупатель', 'популярный', 'потёртость', 'появляются', 'правильный', 'промежуток', 'пропитание',
'пропустить', 'равновесие', 'разделение', 'расстояние', 'растяжение', 'скольжение', 'сокращения', 'солдатский',
'стабильный', 'счастливая', 'тараторить', 'телефонный', 'территория', 'управление', 'упражнение', 'устаревшие',
'фиолетовый']]
| gpl-3.0 |
AnhellO/DAS_Sistemas | Ene-Jun-2021/delabra-salinas-brandon-emmanuel/Examen/Primer Parcial/Ejercicio 4/cajero.py | 1 | 2443 | from abc import ABCMeta, abstractstaticmethod
# en la función next sucesor establecemos al siguiente sucesor dentro de la cadena
#la sifuiente función el evento de la cadena
class CajeroHandler(metaclass=ABCMeta):
@abstractstaticmethod
def next_succesor(next):
@abstractstaticmethod
def handle(cantidad):
#se definen las clases con las funciones de cada uno de los cajeros
class Cajero50ConcreteHandler(CajeroHandler):
def __init__(self):
self._sucesor=None
def next_succesor(self, sucesor):
self._sucesor = sucesor
def handle(self, cambio):
if cambio >= 50:
numero = cambio // 50
resto = cambio % 50
print (f"Dar{numero} $50")
if resto !=0:
self._next.handle(resto)
else:
self._next.handle(cambio)
class Cajero20ConcreteHandler(CajeroHandler):
def __init__(self):
self._sucesor=None
def next_succesor(self, sucesor):
self._sucesor = sucesor
def handle(self, cambio):
if cambio >= 20:
numero = cambio // 20
resto = cambio % 20
print (f"Dar {numero} $20")
if resto !=0:
self._next.handle(resto)
else:
self._next.handle(cambio)
class Cajero10ConcreteHandler(CajeroHandler):
def __init__(self):
self._sucesor=None
def next_succesor(self, sucesor):
self._sucesor = sucesor
def handle(self, cambio):
if cambio >= 10:
numero = cambio // 10
resto = cambio % 10
print (f" Dar{numero} $10")
if resto !=0:
self._next.handle(resto)
else:
self._next.handle(cambio)
class CajeroATMchain:
#La instancia que indica el cambio del cliente
self.chain1 = Cajero50ConcreteHandler()
self.chain2 = Cajero20concretehandler()
self.chain2 = Cajero10concretehandler()
#se establece la consulta del cambio, ya sea por medio del cliente
#o ya sea el mismo conttrolador
self.chain1.set_sucesor(self.chain2)
self.chain2.set_sucesor(self.chain3)
if __name__ == '__main__':
cajero=CajeroATMChain()
Ingreso=int(input("Cantidad la cual va a ingrasar: "))
if ingreso < 10 or ingreso % 10 !=0 :
print("debes dar una cantidad multiplo de 10 ")
cajero.chain1.handle(ingreso) | mit |
amyvmiwei/chromium | tools/site_compare/utils/browser_iterate.py | 6 | 5863 | #!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility to use a browser to visit multiple URLs.
Prerequisites:
1. The command_line package from tools/site_compare
2. Either the IE BHO or Firefox extension (or both)
Installation:
1. Build the IE BHO, or call regsvr32 on a prebuilt binary
2. Add a file called "measurepageloadtimeextension@google.com" to
the default Firefox profile directory under extensions, containing
the path to the Firefox extension root
Invoke with the command line arguments as documented within
the command line.
"""
import command_line
import scrapers
import socket
import time
from drivers import windowing
# Constants
MAX_URL = 1024
PORT = 42492
def SetupIterationCommandLine(cmd):
"""Adds the necessary flags for iteration to a command.
Args:
cmd: an object created by cmdline.AddCommand
"""
cmd.AddArgument(
["-b", "--browser"], "Browser to use (ie, firefox, chrome)",
type="string", required=True)
cmd.AddArgument(
["-b1v", "--browserver"], "Version of browser", metaname="VERSION")
cmd.AddArgument(
["-p", "--browserpath"], "Path to browser.",
type="string", required=False)
cmd.AddArgument(
["-u", "--url"], "URL to visit")
cmd.AddArgument(
["-l", "--list"], "File containing list of URLs to visit", type="readfile")
cmd.AddMutualExclusion(["--url", "--list"])
cmd.AddArgument(
["-s", "--startline"], "First line of URL list", type="int")
cmd.AddArgument(
["-e", "--endline"], "Last line of URL list (exclusive)", type="int")
cmd.AddArgument(
["-c", "--count"], "Number of lines of URL file to use", type="int")
cmd.AddDependency("--startline", "--list")
cmd.AddRequiredGroup(["--url", "--list"])
cmd.AddDependency("--endline", "--list")
cmd.AddDependency("--count", "--list")
cmd.AddMutualExclusion(["--count", "--endline"])
cmd.AddDependency("--count", "--startline")
cmd.AddArgument(
["-t", "--timeout"], "Amount of time (seconds) to wait for browser to "
"finish loading",
type="int", default=300)
cmd.AddArgument(
["-sz", "--size"], "Browser window size", default=(800, 600), type="coords")
def Iterate(command, iteration_func):
"""Iterates over a list of URLs, calling a function on each.
Args:
command: the command line containing the iteration flags
iteration_func: called for each URL with (proc, wnd, url, result)
"""
# Retrieve the browser scraper to use to invoke the browser
scraper = scrapers.GetScraper((command["--browser"], command["--browserver"]))
def AttachToBrowser(path, timeout):
"""Invoke the browser process and connect to the socket."""
(proc, frame, wnd) = scraper.GetBrowser(path)
if not wnd: raise ValueError("Could not invoke browser.")
# Try to connect the socket. If it fails, wait and try
# again. Do this for ten seconds
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
for attempt in xrange(10):
try:
s.connect(("localhost", PORT))
except socket.error:
time.sleep(1)
continue
break
try:
s.getpeername()
except socket.error:
raise ValueError("Could not connect to browser")
if command["--size"]:
# Resize and reposition the frame
windowing.MoveAndSizeWindow(frame, (0, 0), command["--size"], wnd)
s.settimeout(timeout)
Iterate.proc = proc
Iterate.wnd = wnd
Iterate.s = s
def DetachFromBrowser():
"""Close the socket and kill the process if necessary."""
if Iterate.s:
Iterate.s.close()
Iterate.s = None
if Iterate.proc:
if not windowing.WaitForProcessExit(Iterate.proc, 0):
try:
windowing.EndProcess(Iterate.proc)
windowing.WaitForProcessExit(Iterate.proc, 0)
except pywintypes.error:
# Exception here most likely means the process died on its own
pass
Iterate.proc = None
if command["--browserpath"]:
browser = command["--browserpath"]
else:
browser = None
# Read the URLs from the file
if command["--url"]:
url_list = [command["--url"]]
else:
startline = command["--startline"]
if command["--count"]:
endline = startline+command["--count"]
else:
endline = command["--endline"]
url_list = []
file = open(command["--list"], "r")
for line in xrange(startline-1):
file.readline()
for line in xrange(endline-startline):
url_list.append(file.readline().strip())
timeout = command["--timeout"]
# Loop through the URLs and send them through the socket
Iterate.s = None
Iterate.proc = None
Iterate.wnd = None
for url in url_list:
# Invoke the browser if necessary
if not Iterate.proc:
AttachToBrowser(browser, timeout)
# Send the URL and wait for a response
Iterate.s.send(url + "\n")
response = ""
while (response.find("\n") < 0):
try:
recv = Iterate.s.recv(MAX_URL)
response = response + recv
# Workaround for an oddity: when Firefox closes
# gracefully, somehow Python doesn't detect it.
# (Telnet does)
if not recv:
raise socket.error
except socket.timeout:
response = url + ",hang\n"
DetachFromBrowser()
except socket.error:
# If there was a socket error, it's probably a crash
response = url + ",crash\n"
DetachFromBrowser()
# If we received a timeout response, restart the browser
if response[-9:] == ",timeout\n":
DetachFromBrowser()
# Invoke the iteration function
iteration_func(url, Iterate.proc, Iterate.wnd, response)
# We're done
DetachFromBrowser()
| bsd-3-clause |
etiennekruger/phantomjs-qt5 | src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/api_implementation.py | 262 | 2804 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module is the central entity that determines which implementation of the
API is used.
"""
__author__ = 'petar@google.com (Petar Petrov)'
import os
# This environment variable can be used to switch to a certain implementation
# of the Python API. Right now only 'python' and 'cpp' are valid values. Any
# other value will be ignored.
_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION',
'python')
if _implementation_type != 'python':
# For now, by default use the pure-Python implementation.
# The code below checks if the C extension is available and
# uses it if it is available.
_implementation_type = 'cpp'
## Determine automatically which implementation to use.
#try:
# from google.protobuf.internal import cpp_message
# _implementation_type = 'cpp'
#except ImportError, e:
# _implementation_type = 'python'
# Usage of this function is discouraged. Clients shouldn't care which
# implementation of the API is in use. Note that there is no guarantee
# that differences between APIs will be maintained.
# Please don't use this function if possible.
def Type():
return _implementation_type
| bsd-3-clause |
matthaywardwebdesign/rethinkdb | test/rql_test/connections/http_support/werkzeug/wrappers.py | 146 | 76379 | # -*- coding: utf-8 -*-
"""
werkzeug.wrappers
~~~~~~~~~~~~~~~~~
The wrappers are simple request and response objects which you can
subclass to do whatever you want them to do. The request object contains
the information transmitted by the client (webbrowser) and the response
object contains all the information sent back to the browser.
An important detail is that the request object is created with the WSGI
environ and will act as high-level proxy whereas the response object is an
actual WSGI application.
Like everything else in Werkzeug these objects will work correctly with
unicode data. Incoming form data parsed by the response object will be
decoded into an unicode object if possible and if it makes sense.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from datetime import datetime, timedelta
from werkzeug.http import HTTP_STATUS_CODES, \
parse_accept_header, parse_cache_control_header, parse_etags, \
parse_date, generate_etag, is_resource_modified, unquote_etag, \
quote_etag, parse_set_header, parse_authorization_header, \
parse_www_authenticate_header, remove_entity_headers, \
parse_options_header, dump_options_header, http_date, \
parse_if_range_header, parse_cookie, dump_cookie, \
parse_range_header, parse_content_range_header, dump_header
from werkzeug.urls import url_decode, iri_to_uri, url_join
from werkzeug.formparser import FormDataParser, default_stream_factory
from werkzeug.utils import cached_property, environ_property, \
header_property, get_content_type
from werkzeug.wsgi import get_current_url, get_host, \
ClosingIterator, get_input_stream, get_content_length
from werkzeug.datastructures import MultiDict, CombinedMultiDict, Headers, \
EnvironHeaders, ImmutableMultiDict, ImmutableTypeConversionDict, \
ImmutableList, MIMEAccept, CharsetAccept, LanguageAccept, \
ResponseCacheControl, RequestCacheControl, CallbackDict, \
ContentRange, iter_multi_items
from werkzeug._internal import _get_environ
from werkzeug._compat import to_bytes, string_types, text_type, \
integer_types, wsgi_decoding_dance, wsgi_get_bytes, \
to_unicode, to_native, BytesIO
def _run_wsgi_app(*args):
"""This function replaces itself to ensure that the test module is not
imported unless required. DO NOT USE!
"""
global _run_wsgi_app
from werkzeug.test import run_wsgi_app as _run_wsgi_app
return _run_wsgi_app(*args)
def _warn_if_string(iterable):
"""Helper for the response objects to check if the iterable returned
to the WSGI server is not a string.
"""
if isinstance(iterable, string_types):
from warnings import warn
warn(Warning('response iterable was set to a string. This appears '
'to work but means that the server will send the '
'data to the client char, by char. This is almost '
'never intended behavior, use response.data to assign '
'strings to the response object.'), stacklevel=2)
def _assert_not_shallow(request):
if request.shallow:
raise RuntimeError('A shallow request tried to consume '
'form data. If you really want to do '
'that, set `shallow` to False.')
def _iter_encoded(iterable, charset):
for item in iterable:
if isinstance(item, text_type):
yield item.encode(charset)
else:
yield item
class BaseRequest(object):
"""Very basic request object. This does not implement advanced stuff like
entity tag parsing or cache controls. The request object is created with
the WSGI environment as first argument and will add itself to the WSGI
environment as ``'werkzeug.request'`` unless it's created with
`populate_request` set to False.
There are a couple of mixins available that add additional functionality
to the request object, there is also a class called `Request` which
subclasses `BaseRequest` and all the important mixins.
It's a good idea to create a custom subclass of the :class:`BaseRequest`
and add missing functionality either via mixins or direct implementation.
Here an example for such subclasses::
from werkzeug.wrappers import BaseRequest, ETagRequestMixin
class Request(BaseRequest, ETagRequestMixin):
pass
Request objects are **read only**. As of 0.5 modifications are not
allowed in any place. Unlike the lower level parsing functions the
request object will use immutable objects everywhere possible.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Per default the request object will be added to the WSGI
environment as `werkzeug.request` to support the debugging system.
If you don't want that, set `populate_request` to `False`.
If `shallow` is `True` the environment is initialized as shallow
object around the environ. Every operation that would modify the
environ in any way (such as consuming form data) raises an exception
unless the `shallow` attribute is explicitly set to `False`. This
is useful for middlewares where you don't want to consume the form
data by accident. A shallow request is not populated to the WSGI
environment.
.. versionchanged:: 0.5
read-only mode was enforced by using immutables classes for all
data.
"""
#: the charset for the request, defaults to utf-8
charset = 'utf-8'
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = 'replace'
#: the maximum content length. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: parsing fails because more than the specified value is transmitted
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_content_length = None
#: the maximum form field size. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: data in memory for post data is longer than the specified value a
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_form_memory_size = None
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class = ImmutableList
#: the type to be used for dict values from the incoming WSGI environment.
#: By default an
#: :class:`~werkzeug.datastructures.ImmutableTypeConversionDict` is used
#: (for example for :attr:`cookies`).
#:
#: .. versionadded:: 0.6
dict_storage_class = ImmutableTypeConversionDict
#: The form data parser that shoud be used. Can be replaced to customize
#: the form date parsing.
form_data_parser_class = FormDataParser
#: Optionally a list of hosts that is trusted by this request. By default
#: all hosts are trusted which means that whatever the client sends the
#: host is will be accepted. This is the recommended setup as a webserver
#: should manually be set up to not route invalid hosts to the application.
#:
#: .. versionadded:: 0.9
trusted_hosts = None
#: Indicates weather the data descriptor should be allowed to read and
#: buffer up the input stream. By default it's enabled.
#:
#: .. versionadded:: 0.9
disable_data_descriptor = False
def __init__(self, environ, populate_request=True, shallow=False):
self.environ = environ
if populate_request and not shallow:
self.environ['werkzeug.request'] = self
self.shallow = shallow
def __repr__(self):
# make sure the __repr__ even works if the request was created
# from an invalid WSGI environment. If we display the request
# in a debug session we don't want the repr to blow up.
args = []
try:
args.append("'%s'" % self.url)
args.append('[%s]' % self.method)
except Exception:
args.append('(invalid WSGI environ)')
return '<%s %s>' % (
self.__class__.__name__,
' '.join(args)
)
@property
def url_charset(self):
"""The charset that is assumed for URLs. Defaults to the value
of :attr:`charset`.
.. versionadded:: 0.6
"""
return self.charset
@classmethod
def from_values(cls, *args, **kwargs):
"""Create a new request object based on the values provided. If
environ is given missing values are filled from there. This method is
useful for small scripts when you need to simulate a request from an URL.
Do not use this method for unittesting, there is a full featured client
object (:class:`Client`) that allows to create multipart requests,
support for cookies etc.
This accepts the same options as the
:class:`~werkzeug.test.EnvironBuilder`.
.. versionchanged:: 0.5
This method now accepts the same arguments as
:class:`~werkzeug.test.EnvironBuilder`. Because of this the
`environ` parameter is now called `environ_overrides`.
:return: request object
"""
from werkzeug.test import EnvironBuilder
charset = kwargs.pop('charset', cls.charset)
kwargs['charset'] = charset
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_request(cls)
finally:
builder.close()
@classmethod
def application(cls, f):
"""Decorate a function as responder that accepts the request as first
argument. This works like the :func:`responder` decorator but the
function is passed the request object as first argument and the
request object will be closed automatically::
@Request.application
def my_wsgi_app(request):
return Response('Hello World!')
:param f: the WSGI callable to decorate
:return: a new WSGI callable
"""
#: return a callable that wraps the -2nd argument with the request
#: and calls the function with all the arguments up to that one and
#: the request. The return value is then called with the latest
#: two arguments. This makes it possible to use this decorator for
#: both methods and standalone WSGI functions.
def application(*args):
request = cls(args[-2])
with request:
return f(*args[:-2] + (request,))(*args[-2:])
return update_wrapper(application, f)
def _get_file_stream(self, total_content_length, content_type, filename=None,
content_length=None):
"""Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value.
"""
return default_stream_factory(total_content_length, content_type,
filename, content_length)
@property
def want_form_data_parsed(self):
"""Returns True if the request method carries content. As of
Werkzeug 0.9 this will be the case if a content type is transmitted.
.. versionadded:: 0.8
"""
return bool(self.environ.get('CONTENT_TYPE'))
def make_form_data_parser(self):
"""Creates the form data parser. Instanciates the
:attr:`form_data_parser_class` with some parameters.
.. versionadded:: 0.8
"""
return self.form_data_parser_class(self._get_file_stream,
self.charset,
self.encoding_errors,
self.max_form_memory_size,
self.max_content_length,
self.parameter_storage_class)
def _load_form_data(self):
"""Method used internally to retrieve submitted data. After calling
this sets `form` and `files` on the request object to multi dicts
filled with the incoming form data. As a matter of fact the input
stream will be empty afterwards. You can also call this method to
force the parsing of the form data.
.. versionadded:: 0.8
"""
# abort early if we have already consumed the stream
if 'form' in self.__dict__:
return
_assert_not_shallow(self)
if self.want_form_data_parsed:
content_type = self.environ.get('CONTENT_TYPE', '')
content_length = get_content_length(self.environ)
mimetype, options = parse_options_header(content_type)
parser = self.make_form_data_parser()
data = parser.parse(self._get_stream_for_parsing(),
mimetype, content_length, options)
else:
data = (self.stream, self.parameter_storage_class(),
self.parameter_storage_class())
# inject the values into the instance dict so that we bypass
# our cached_property non-data descriptor.
d = self.__dict__
d['stream'], d['form'], d['files'] = data
def _get_stream_for_parsing(self):
"""This is the same as accessing :attr:`stream` with the difference
that if it finds cached data from calling :meth:`get_data` first it
will create a new stream out of the cached data.
.. versionadded:: 0.9.3
"""
cached_data = getattr(self, '_cached_data', None)
if cached_data is not None:
return BytesIO(cached_data)
return self.stream
def close(self):
"""Closes associated resources of this request object. This
closes all file handles explicitly. You can also use the request
object in a with statement with will automatically close it.
.. versionadded:: 0.9
"""
files = self.__dict__.get('files')
for key, value in iter_multi_items(files or ()):
value.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
@cached_property
def stream(self):
"""The stream to read incoming data from. Unlike :attr:`input_stream`
this stream is properly guarded that you can't accidentally read past
the length of the input. Werkzeug will internally always refer to
this stream to read data which makes it possible to wrap this
object with a stream that does filtering.
.. versionchanged:: 0.9
This stream is now always available but might be consumed by the
form parser later on. Previously the stream was only set if no
parsing happened.
"""
_assert_not_shallow(self)
return get_input_stream(self.environ)
input_stream = environ_property('wsgi.input', 'The WSGI input stream.\n'
'In general it\'s a bad idea to use this one because you can easily '
'read past the boundary. Use the :attr:`stream` instead.')
@cached_property
def args(self):
"""The parsed URL parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(wsgi_get_bytes(self.environ.get('QUERY_STRING', '')),
self.url_charset, errors=self.encoding_errors,
cls=self.parameter_storage_class)
@cached_property
def data(self):
if self.disable_data_descriptor:
raise AttributeError('data descriptor is disabled')
# XXX: this should eventually be deprecated.
# We trigger form data parsing first which means that the descriptor
# will not cache the data that would otherwise be .form or .files
# data. This restores the behavior that was there in Werkzeug
# before 0.9. New code should use :meth:`get_data` explicitly as
# this will make behavior explicit.
return self.get_data(parse_form_data=True)
def get_data(self, cache=True, as_text=False, parse_form_data=False):
"""This reads the buffered incoming data from the client into one
bytestring. By default this is cached but that behavior can be
changed by setting `cache` to `False`.
Usually it's a bad idea to call this method without checking the
content length first as a client could send dozens of megabytes or more
to cause memory problems on the server.
Note that if the form data was already parsed this method will not
return anything as form data parsing does not cache the data like
this method does. To implicitly invoke form data parsing function
set `parse_form_data` to `True`. When this is done the return value
of this method will be an empty string if the form parser handles
the data. This generally is not necessary as if the whole data is
cached (which is the default) the form parser will used the cached
data to parse the form data. Please be generally aware of checking
the content length first in any case before calling this method
to avoid exhausting server memory.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
rv = getattr(self, '_cached_data', None)
if rv is None:
if parse_form_data:
self._load_form_data()
rv = self.stream.read()
if cache:
self._cached_data = rv
if as_text:
rv = rv.decode(self.charset, self.encoding_errors)
return rv
@cached_property
def form(self):
"""The form parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
self._load_form_data()
return self.form
@cached_property
def values(self):
"""Combined multi dict for :attr:`args` and :attr:`form`."""
args = []
for d in self.args, self.form:
if not isinstance(d, MultiDict):
d = MultiDict(d)
args.append(d)
return CombinedMultiDict(args)
@cached_property
def files(self):
""":class:`~werkzeug.datastructures.MultiDict` object containing
all uploaded files. Each key in :attr:`files` is the name from the
``<input type="file" name="">``. Each value in :attr:`files` is a
Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
Note that :attr:`files` will only contain data if the request method was
POST, PUT or PATCH and the ``<form>`` that posted to the request had
``enctype="multipart/form-data"``. It will be empty otherwise.
See the :class:`~werkzeug.datastructures.MultiDict` /
:class:`~werkzeug.datastructures.FileStorage` documentation for
more details about the used data structure.
"""
self._load_form_data()
return self.files
@cached_property
def cookies(self):
"""Read only access to the retrieved cookie values as dictionary."""
return parse_cookie(self.environ, self.charset,
self.encoding_errors,
cls=self.dict_storage_class)
@cached_property
def headers(self):
"""The headers from the WSGI environ as immutable
:class:`~werkzeug.datastructures.EnvironHeaders`.
"""
return EnvironHeaders(self.environ)
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will always include a leading slash,
even if the URL root is accessed.
"""
raw_path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',
self.charset, self.encoding_errors)
return '/' + raw_path.lstrip('/')
@cached_property
def full_path(self):
"""Requested path as unicode, including the query string."""
return self.path + u'?' + to_unicode(self.query_string, self.url_charset)
@cached_property
def script_root(self):
"""The root path of the script without the trailing slash."""
raw_path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',
self.charset, self.encoding_errors)
return raw_path.rstrip('/')
@cached_property
def url(self):
"""The reconstructed current URL as IRI."""
return get_current_url(self.environ,
trusted_hosts=self.trusted_hosts)
@cached_property
def base_url(self):
"""Like :attr:`url` but without the querystring"""
return get_current_url(self.environ, strip_querystring=True,
trusted_hosts=self.trusted_hosts)
@cached_property
def url_root(self):
"""The full URL root (with hostname), this is the application
root as IRI.
"""
return get_current_url(self.environ, True,
trusted_hosts=self.trusted_hosts)
@cached_property
def host_url(self):
"""Just the host with scheme as IRI."""
return get_current_url(self.environ, host_only=True,
trusted_hosts=self.trusted_hosts)
@cached_property
def host(self):
"""Just the host including the port if available."""
return get_host(self.environ, trusted_hosts=self.trusted_hosts)
query_string = environ_property('QUERY_STRING', '', read_only=True,
load_func=wsgi_get_bytes, doc=
'''The URL parameters as raw bytestring.''')
method = environ_property('REQUEST_METHOD', 'GET', read_only=True, doc=
'''The transmission method. (For example ``'GET'`` or ``'POST'``).''')
@cached_property
def access_route(self):
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if 'HTTP_X_FORWARDED_FOR' in self.environ:
addr = self.environ['HTTP_X_FORWARDED_FOR'].split(',')
return self.list_storage_class([x.strip() for x in addr])
elif 'REMOTE_ADDR' in self.environ:
return self.list_storage_class([self.environ['REMOTE_ADDR']])
return self.list_storage_class()
@property
def remote_addr(self):
"""The remote address of the client."""
return self.environ.get('REMOTE_ADDR')
remote_user = environ_property('REMOTE_USER', doc='''
If the server supports user authentication, and the script is
protected, this attribute contains the username the user has
authenticated as.''')
scheme = environ_property('wsgi.url_scheme', doc='''
URL scheme (http or https).
.. versionadded:: 0.7''')
is_xhr = property(lambda x: x.environ.get('HTTP_X_REQUESTED_WITH', '')
.lower() == 'xmlhttprequest', doc='''
True if the request was triggered via a JavaScript XMLHttpRequest.
This only works with libraries that support the `X-Requested-With`
header and set it to "XMLHttpRequest". Libraries that do that are
prototype, jQuery and Mochikit and probably some more.''')
is_secure = property(lambda x: x.environ['wsgi.url_scheme'] == 'https',
doc='`True` if the request is secure.')
is_multithread = environ_property('wsgi.multithread', doc='''
boolean that is `True` if the application is served by
a multithreaded WSGI server.''')
is_multiprocess = environ_property('wsgi.multiprocess', doc='''
boolean that is `True` if the application is served by
a WSGI server that spawns multiple processes.''')
is_run_once = environ_property('wsgi.run_once', doc='''
boolean that is `True` if the application will be executed only
once in a process lifetime. This is the case for CGI for example,
but it's not guaranteed that the exeuction only happens one time.''')
class BaseResponse(object):
"""Base response class. The most important fact about a response object
is that it's a regular WSGI application. It's initialized with a couple
of response parameters (headers, body, status code etc.) and will start a
valid WSGI response when called with the environ and start response
callable.
Because it's a WSGI application itself processing usually ends before the
actual response is sent to the server. This helps debugging systems
because they can catch all the exceptions before responses are started.
Here a small example WSGI application that takes advantage of the
response objects::
from werkzeug.wrappers import BaseResponse as Response
def index():
return Response('Index page')
def application(environ, start_response):
path = environ.get('PATH_INFO') or '/'
if path == '/':
response = index()
else:
response = Response('Not Found', status=404)
return response(environ, start_response)
Like :class:`BaseRequest` which object is lacking a lot of functionality
implemented in mixins. This gives you a better control about the actual
API of your response objects, so you can create subclasses and add custom
functionality. A full featured response object is available as
:class:`Response` which implements a couple of useful mixins.
To enforce a new type of already existing responses you can use the
:meth:`force_type` method. This is useful if you're working with different
subclasses of response objects and you want to post process them with a
know interface.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Response can be any kind of iterable or string. If it's a string it's
considered being an iterable with one item which is the string passed.
Headers can be a list of tuples or a
:class:`~werkzeug.datastructures.Headers` object.
Special note for `mimetype` and `content_type`: For most mime types
`mimetype` and `content_type` work the same, the difference affects
only 'text' mimetypes. If the mimetype passed with `mimetype` is a
mimetype starting with `text/`, the charset parameter of the response
object is appended to it. In contrast the `content_type` parameter is
always added as header unmodified.
.. versionchanged:: 0.5
the `direct_passthrough` parameter was added.
:param response: a string or response iterable.
:param status: a string with a status or an integer with the status code.
:param headers: a list of headers or a
:class:`~werkzeug.datastructures.Headers` object.
:param mimetype: the mimetype for the request. See notice above.
:param content_type: the content type for the request. See notice above.
:param direct_passthrough: if set to `True` :meth:`iter_encoded` is not
called before iteration which makes it
possible to pass special iterators though
unchanged (see :func:`wrap_file` for more
details.)
"""
#: the charset of the response.
charset = 'utf-8'
#: the default status if none is provided.
default_status = 200
#: the default mimetype if none is provided.
default_mimetype = 'text/plain'
#: if set to `False` accessing properties on the response object will
#: not try to consume the response iterator and convert it into a list.
#:
#: .. versionadded:: 0.6.2
#:
#: That attribute was previously called `implicit_seqence_conversion`.
#: (Notice the typo). If you did use this feature, you have to adapt
#: your code to the name change.
implicit_sequence_conversion = True
#: Should this response object correct the location header to be RFC
#: conformant? This is true by default.
#:
#: .. versionadded:: 0.8
autocorrect_location_header = True
#: Should this response object automatically set the content-length
#: header if possible? This is true by default.
#:
#: .. versionadded:: 0.8
automatically_set_content_length = True
def __init__(self, response=None, status=None, headers=None,
mimetype=None, content_type=None, direct_passthrough=False):
if isinstance(headers, Headers):
self.headers = headers
elif not headers:
self.headers = Headers()
else:
self.headers = Headers(headers)
if content_type is None:
if mimetype is None and 'content-type' not in self.headers:
mimetype = self.default_mimetype
if mimetype is not None:
mimetype = get_content_type(mimetype, self.charset)
content_type = mimetype
if content_type is not None:
self.headers['Content-Type'] = content_type
if status is None:
status = self.default_status
if isinstance(status, integer_types):
self.status_code = status
else:
self.status = status
self.direct_passthrough = direct_passthrough
self._on_close = []
# we set the response after the headers so that if a class changes
# the charset attribute, the data is set in the correct charset.
if response is None:
self.response = []
elif isinstance(response, (text_type, bytes, bytearray)):
self.set_data(response)
else:
self.response = response
def call_on_close(self, func):
"""Adds a function to the internal list of functions that should
be called as part of closing down the response. Since 0.7 this
function also returns the function that was passed so that this
can be used as a decorator.
.. versionadded:: 0.6
"""
self._on_close.append(func)
return func
def __repr__(self):
if self.is_sequence:
body_info = '%d bytes' % sum(map(len, self.iter_encoded()))
else:
body_info = self.is_streamed and 'streamed' or 'likely-streamed'
return '<%s %s [%s]>' % (
self.__class__.__name__,
body_info,
self.status
)
@classmethod
def force_type(cls, response, environ=None):
"""Enforce that the WSGI response is a response object of the current
type. Werkzeug will use the :class:`BaseResponse` internally in many
situations like the exceptions. If you call :meth:`get_response` on an
exception you will get back a regular :class:`BaseResponse` object, even
if you are using a custom subclass.
This method can enforce a given response type, and it will also
convert arbitrary WSGI callables into response objects if an environ
is provided::
# convert a Werkzeug response object into an instance of the
# MyResponseClass subclass.
response = MyResponseClass.force_type(response)
# convert any WSGI application into a response object
response = MyResponseClass.force_type(response, environ)
This is especially useful if you want to post-process responses in
the main dispatcher and use functionality provided by your subclass.
Keep in mind that this will modify response objects in place if
possible!
:param response: a response object or wsgi application.
:param environ: a WSGI environment object.
:return: a response object.
"""
if not isinstance(response, BaseResponse):
if environ is None:
raise TypeError('cannot convert WSGI application into '
'response objects without an environ')
response = BaseResponse(*_run_wsgi_app(response, environ))
response.__class__ = cls
return response
@classmethod
def from_app(cls, app, environ, buffered=False):
"""Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
"""
return cls(*_run_wsgi_app(app, environ, buffered))
def _get_status_code(self):
return self._status_code
def _set_status_code(self, code):
self._status_code = code
try:
self._status = '%d %s' % (code, HTTP_STATUS_CODES[code].upper())
except KeyError:
self._status = '%d UNKNOWN' % code
status_code = property(_get_status_code, _set_status_code,
doc='The HTTP Status code as number')
del _get_status_code, _set_status_code
def _get_status(self):
return self._status
def _set_status(self, value):
self._status = to_native(value)
try:
self._status_code = int(self._status.split(None, 1)[0])
except ValueError:
self._status_code = 0
self._status = '0 %s' % self._status
status = property(_get_status, _set_status, doc='The HTTP Status code')
del _get_status, _set_status
def get_data(self, as_text=False):
"""The string representation of the request body. Whenever you call
this property the request iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
self._ensure_sequence()
rv = b''.join(self.iter_encoded())
if as_text:
rv = rv.decode(self.charset)
return rv
def set_data(self, value):
"""Sets a new string as response. The value set must either by a
unicode or bytestring. If a unicode string is set it's encoded
automatically to the charset of the response (utf-8 by default).
.. versionadded:: 0.9
"""
# if an unicode string is set, it's encoded directly so that we
# can set the content length
if isinstance(value, text_type):
value = value.encode(self.charset)
else:
value = bytes(value)
self.response = [value]
if self.automatically_set_content_length:
self.headers['Content-Length'] = str(len(value))
data = property(get_data, set_data, doc='''
A descriptor that calls :meth:`get_data` and :meth:`set_data`. This
should not be used and will eventually get deprecated.
''')
def calculate_content_length(self):
"""Returns the content length if available or `None` otherwise."""
try:
self._ensure_sequence()
except RuntimeError:
return None
return sum(len(x) for x in self.response)
def _ensure_sequence(self, mutable=False):
"""This method can be called by methods that need a sequence. If
`mutable` is true, it will also ensure that the response sequence
is a standard Python list.
.. versionadded:: 0.6
"""
if self.is_sequence:
# if we need a mutable object, we ensure it's a list.
if mutable and not isinstance(self.response, list):
self.response = list(self.response)
return
if self.direct_passthrough:
raise RuntimeError('Attempted implicit sequence conversion '
'but the response object is in direct '
'passthrough mode.')
if not self.implicit_sequence_conversion:
raise RuntimeError('The response object required the iterable '
'to be a sequence, but the implicit '
'conversion was disabled. Call '
'make_sequence() yourself.')
self.make_sequence()
def make_sequence(self):
"""Converts the response iterator in a list. By default this happens
automatically if required. If `implicit_sequence_conversion` is
disabled, this method is not automatically called and some properties
might raise exceptions. This also encodes all the items.
.. versionadded:: 0.6
"""
if not self.is_sequence:
# if we consume an iterable we have to ensure that the close
# method of the iterable is called if available when we tear
# down the response
close = getattr(self.response, 'close', None)
self.response = list(self.iter_encoded())
if close is not None:
self.call_on_close(close)
def iter_encoded(self):
"""Iter the response encoded with the encoding of the response.
If the response object is invoked as WSGI application the return
value of this method is used as application iterator unless
:attr:`direct_passthrough` was activated.
"""
if __debug__:
_warn_if_string(self.response)
# Encode in a separate function so that self.response is fetched
# early. This allows us to wrap the response with the return
# value from get_app_iter or iter_encoded.
return _iter_encoded(self.response, self.charset)
def set_cookie(self, key, value='', max_age=None, expires=None,
path='/', domain=None, secure=None, httponly=False):
"""Sets a cookie. The parameters are the same as in the cookie `Morsel`
object in the Python standard library but it accepts unicode data, too.
:param key: the key (name) of the cookie to be set.
:param value: the value of the cookie.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session.
:param expires: should be a `datetime` object or UNIX timestamp.
:param domain: if you want to set a cross-domain cookie. For example,
``domain=".example.com"`` will set a cookie that is
readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
"""
self.headers.add('Set-Cookie', dump_cookie(key, value, max_age,
expires, path, domain, secure, httponly,
self.charset))
def delete_cookie(self, key, path='/', domain=None):
"""Delete a cookie. Fails silently if key doesn't exist.
:param key: the key (name) of the cookie to be deleted.
:param path: if the cookie that should be deleted was limited to a
path, the path has to be defined here.
:param domain: if the cookie that should be deleted was limited to a
domain, that domain has to be defined here.
"""
self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
@property
def is_streamed(self):
"""If the response is streamed (the response is not an iterable with
a length information) this property is `True`. In this case streamed
means that there is no information about the number of iterations.
This is usually `True` if a generator is passed to the response object.
This is useful for checking before applying some sort of post
filtering that should not take place for streamed responses.
"""
try:
len(self.response)
except (TypeError, AttributeError):
return True
return False
@property
def is_sequence(self):
"""If the iterator is buffered, this property will be `True`. A
response object will consider an iterator to be buffered if the
response attribute is a list or tuple.
.. versionadded:: 0.6
"""
return isinstance(self.response, (tuple, list))
def close(self):
"""Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement.
"""
if hasattr(self.response, 'close'):
self.response.close()
for func in self._on_close:
func()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def freeze(self):
"""Call this method if you want to make your response object ready for
being pickled. This buffers the generator if there is one. It will
also set the `Content-Length` header to the length of the body.
.. versionchanged:: 0.6
The `Content-Length` header is now set.
"""
# we explicitly set the length to a list of the *encoded* response
# iterator. Even if the implicit sequence conversion is disabled.
self.response = list(self.iter_encoded())
self.headers['Content-Length'] = str(sum(map(len, self.response)))
def get_wsgi_headers(self, environ):
"""This is automatically called right before the response is started
and returns headers modified for the given environment. It returns a
copy of the headers from the response with some modifications applied
if necessary.
For example the location header (if present) is joined with the root
URL of the environment. Also the content length is automatically set
to zero here for certain status codes.
.. versionchanged:: 0.6
Previously that function was called `fix_headers` and modified
the response object in place. Also since 0.6, IRIs in location
and content-location headers are handled properly.
Also starting with 0.6, Werkzeug will attempt to set the content
length if it is able to figure it out on its own. This is the
case if all the strings in the response iterable are already
encoded and the iterable is buffered.
:param environ: the WSGI environment of the request.
:return: returns a new :class:`~werkzeug.datastructures.Headers`
object.
"""
headers = Headers(self.headers)
location = None
content_location = None
content_length = None
status = self.status_code
# iterate over the headers to find all values in one go. Because
# get_wsgi_headers is used each response that gives us a tiny
# speedup.
for key, value in headers:
ikey = key.lower()
if ikey == u'location':
location = value
elif ikey == u'content-location':
content_location = value
elif ikey == u'content-length':
content_length = value
# make sure the location header is an absolute URL
if location is not None:
old_location = location
if isinstance(location, text_type):
# Safe conversion is necessary here as we might redirect
# to a broken URI scheme (for instance itms-services).
location = iri_to_uri(location, safe_conversion=True)
if self.autocorrect_location_header:
current_url = get_current_url(environ, root_only=True)
if isinstance(current_url, text_type):
current_url = iri_to_uri(current_url)
location = url_join(current_url, location)
if location != old_location:
headers['Location'] = location
# make sure the content location is a URL
if content_location is not None and \
isinstance(content_location, text_type):
headers['Content-Location'] = iri_to_uri(content_location)
# remove entity headers and set content length to zero if needed.
# Also update content_length accordingly so that the automatic
# content length detection does not trigger in the following
# code.
if 100 <= status < 200 or status == 204:
headers['Content-Length'] = content_length = u'0'
elif status == 304:
remove_entity_headers(headers)
# if we can determine the content length automatically, we
# should try to do that. But only if this does not involve
# flattening the iterator or encoding of unicode strings in
# the response. We however should not do that if we have a 304
# response.
if self.automatically_set_content_length and \
self.is_sequence and content_length is None and status != 304:
try:
content_length = sum(len(to_bytes(x, 'ascii'))
for x in self.response)
except UnicodeError:
# aha, something non-bytestringy in there, too bad, we
# can't safely figure out the length of the response.
pass
else:
headers['Content-Length'] = str(content_length)
return headers
def get_app_iter(self, environ):
"""Returns the application iterator for the given environ. Depending
on the request method and the current status code the return value
might be an empty response rather than the one from the response.
If the request method is `HEAD` or the status code is in a range
where the HTTP specification requires an empty response, an empty
iterable is returned.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: a response iterable.
"""
status = self.status_code
if environ['REQUEST_METHOD'] == 'HEAD' or \
100 <= status < 200 or status in (204, 304):
iterable = ()
elif self.direct_passthrough:
if __debug__:
_warn_if_string(self.response)
return self.response
else:
iterable = self.iter_encoded()
return ClosingIterator(iterable, self.close)
def get_wsgi_response(self, environ):
"""Returns the final WSGI response as tuple. The first item in
the tuple is the application iterator, the second the status and
the third the list of headers. The response returned is created
specially for the given environment. For example if the request
method in the WSGI environment is ``'HEAD'`` the response will
be empty and only the headers and status code will be present.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: an ``(app_iter, status, headers)`` tuple.
"""
headers = self.get_wsgi_headers(environ)
app_iter = self.get_app_iter(environ)
return app_iter, self.status, headers.to_wsgi_list()
def __call__(self, environ, start_response):
"""Process this response as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
:return: an application iterator
"""
app_iter, status, headers = self.get_wsgi_response(environ)
start_response(status, headers)
return app_iter
class AcceptMixin(object):
"""A mixin for classes with an :attr:`~BaseResponse.environ` attribute
to get all the HTTP accept headers as
:class:`~werkzeug.datastructures.Accept` objects (or subclasses
thereof).
"""
@cached_property
def accept_mimetypes(self):
"""List of mimetypes this client supports as
:class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT'), MIMEAccept)
@cached_property
def accept_charsets(self):
"""List of charsets this client supports as
:class:`~werkzeug.datastructures.CharsetAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_CHARSET'),
CharsetAccept)
@cached_property
def accept_encodings(self):
"""List of encodings this client accepts. Encodings in a HTTP term
are compression encodings such as gzip. For charsets have a look at
:attr:`accept_charset`.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_ENCODING'))
@cached_property
def accept_languages(self):
"""List of languages this client accepts as
:class:`~werkzeug.datastructures.LanguageAccept` object.
.. versionchanged 0.5
In previous versions this was a regular
:class:`~werkzeug.datastructures.Accept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_LANGUAGE'),
LanguageAccept)
class ETagRequestMixin(object):
"""Add entity tag and cache descriptors to a request object or object with
a WSGI environment available as :attr:`~BaseRequest.environ`. This not
only provides access to etags but also to the cache control header.
"""
@cached_property
def cache_control(self):
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
for the incoming cache control headers.
"""
cache_control = self.environ.get('HTTP_CACHE_CONTROL')
return parse_cache_control_header(cache_control, None,
RequestCacheControl)
@cached_property
def if_match(self):
"""An object containing all the etags in the `If-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_MATCH'))
@cached_property
def if_none_match(self):
"""An object containing all the etags in the `If-None-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_NONE_MATCH'))
@cached_property
def if_modified_since(self):
"""The parsed `If-Modified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_MODIFIED_SINCE'))
@cached_property
def if_unmodified_since(self):
"""The parsed `If-Unmodified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_UNMODIFIED_SINCE'))
@cached_property
def if_range(self):
"""The parsed `If-Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.IfRange`
"""
return parse_if_range_header(self.environ.get('HTTP_IF_RANGE'))
@cached_property
def range(self):
"""The parsed `Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.Range`
"""
return parse_range_header(self.environ.get('HTTP_RANGE'))
class UserAgentMixin(object):
"""Adds a `user_agent` attribute to the request object which contains the
parsed user agent of the browser that triggered the request as a
:class:`~werkzeug.useragents.UserAgent` object.
"""
@cached_property
def user_agent(self):
"""The current user agent."""
from werkzeug.useragents import UserAgent
return UserAgent(self.environ)
class AuthorizationMixin(object):
"""Adds an :attr:`authorization` property that represents the parsed
value of the `Authorization` header as
:class:`~werkzeug.datastructures.Authorization` object.
"""
@cached_property
def authorization(self):
"""The `Authorization` object in parsed form."""
header = self.environ.get('HTTP_AUTHORIZATION')
return parse_authorization_header(header)
class StreamOnlyMixin(object):
"""If mixed in before the request object this will change the bahavior
of it to disable handling of form parsing. This disables the
:attr:`files`, :attr:`form` attributes and will just provide a
:attr:`stream` attribute that however is always available.
.. versionadded:: 0.9
"""
disable_data_descriptor = True
want_form_data_parsed = False
class ETagResponseMixin(object):
"""Adds extra functionality to a response object for etag and cache
handling. This mixin requires an object with at least a `headers`
object that implements a dict like interface similar to
:class:`~werkzeug.datastructures.Headers`.
If you want the :meth:`freeze` method to automatically add an etag, you
have to mixin this method before the response base class. The default
response class does not do that.
"""
@property
def cache_control(self):
"""The Cache-Control general-header field is used to specify
directives that MUST be obeyed by all caching mechanisms along the
request/response chain.
"""
def on_update(cache_control):
if not cache_control and 'cache-control' in self.headers:
del self.headers['cache-control']
elif cache_control:
self.headers['Cache-Control'] = cache_control.to_header()
return parse_cache_control_header(self.headers.get('cache-control'),
on_update,
ResponseCacheControl)
def make_conditional(self, request_or_environ):
"""Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
"""
environ = _get_environ(request_or_environ)
if environ['REQUEST_METHOD'] in ('GET', 'HEAD'):
# if the date is not in the headers, add it now. We however
# will not override an already existing header. Unfortunately
# this header will be overriden by many WSGI servers including
# wsgiref.
if 'date' not in self.headers:
self.headers['Date'] = http_date()
if 'content-length' not in self.headers:
length = self.calculate_content_length()
if length is not None:
self.headers['Content-Length'] = length
if not is_resource_modified(environ, self.headers.get('etag'), None,
self.headers.get('last-modified')):
self.status_code = 304
return self
def add_etag(self, overwrite=False, weak=False):
"""Add an etag for the current response if there is none yet."""
if overwrite or 'etag' not in self.headers:
self.set_etag(generate_etag(self.get_data()), weak)
def set_etag(self, etag, weak=False):
"""Set the etag, and override the old one if there was one."""
self.headers['ETag'] = quote_etag(etag, weak)
def get_etag(self):
"""Return a tuple in the form ``(etag, is_weak)``. If there is no
ETag the return value is ``(None, None)``.
"""
return unquote_etag(self.headers.get('ETag'))
def freeze(self, no_etag=False):
"""Call this method if you want to make your response object ready for
pickeling. This buffers the generator if there is one. This also
sets the etag unless `no_etag` is set to `True`.
"""
if not no_etag:
self.add_etag()
super(ETagResponseMixin, self).freeze()
accept_ranges = header_property('Accept-Ranges', doc='''
The `Accept-Ranges` header. Even though the name would indicate
that multiple values are supported, it must be one string token only.
The values ``'bytes'`` and ``'none'`` are common.
.. versionadded:: 0.7''')
def _get_content_range(self):
def on_update(rng):
if not rng:
del self.headers['content-range']
else:
self.headers['Content-Range'] = rng.to_header()
rv = parse_content_range_header(self.headers.get('content-range'),
on_update)
# always provide a content range object to make the descriptor
# more user friendly. It provides an unset() method that can be
# used to remove the header quickly.
if rv is None:
rv = ContentRange(None, None, None, on_update=on_update)
return rv
def _set_content_range(self, value):
if not value:
del self.headers['content-range']
elif isinstance(value, string_types):
self.headers['Content-Range'] = value
else:
self.headers['Content-Range'] = value.to_header()
content_range = property(_get_content_range, _set_content_range, doc='''
The `Content-Range` header as
:class:`~werkzeug.datastructures.ContentRange` object. Even if the
header is not set it wil provide such an object for easier
manipulation.
.. versionadded:: 0.7''')
del _get_content_range, _set_content_range
class ResponseStream(object):
"""A file descriptor like object used by the :class:`ResponseStreamMixin` to
represent the body of the stream. It directly pushes into the response
iterable of the response object.
"""
mode = 'wb+'
def __init__(self, response):
self.response = response
self.closed = False
def write(self, value):
if self.closed:
raise ValueError('I/O operation on closed file')
self.response._ensure_sequence(mutable=True)
self.response.response.append(value)
def writelines(self, seq):
for item in seq:
self.write(item)
def close(self):
self.closed = True
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
@property
def encoding(self):
return self.response.charset
class ResponseStreamMixin(object):
"""Mixin for :class:`BaseRequest` subclasses. Classes that inherit from
this mixin will automatically get a :attr:`stream` property that provides
a write-only interface to the response iterable.
"""
@cached_property
def stream(self):
"""The response iterable as write-only stream."""
return ResponseStream(self)
class CommonRequestDescriptorsMixin(object):
"""A mixin for :class:`BaseRequest` subclasses. Request objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
.. versionadded:: 0.5
"""
content_type = environ_property('CONTENT_TYPE', doc='''
The Content-Type entity-header field indicates the media type of
the entity-body sent to the recipient or, in the case of the HEAD
method, the media type that would have been sent had the request
been a GET.''')
@cached_property
def content_length(self):
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
return get_content_length(self.environ)
content_encoding = environ_property('HTTP_CONTENT_ENCODING', doc='''
The Content-Encoding entity-header field is used as a modifier to the
media-type. When present, its value indicates what additional content
codings have been applied to the entity-body, and thus what decoding
mechanisms must be applied in order to obtain the media-type
referenced by the Content-Type header field.
.. versionadded:: 0.9''')
content_md5 = environ_property('HTTP_CONTENT_MD5', doc='''
The Content-MD5 entity-header field, as defined in RFC 1864, is an
MD5 digest of the entity-body for the purpose of providing an
end-to-end message integrity check (MIC) of the entity-body. (Note:
a MIC is good for detecting accidental modification of the
entity-body in transit, but is not proof against malicious attacks.)
.. versionadded:: 0.9''')
referrer = environ_property('HTTP_REFERER', doc='''
The Referer[sic] request-header field allows the client to specify,
for the server's benefit, the address (URI) of the resource from which
the Request-URI was obtained (the "referrer", although the header
field is misspelled).''')
date = environ_property('HTTP_DATE', None, parse_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
max_forwards = environ_property('HTTP_MAX_FORWARDS', None, int, doc='''
The Max-Forwards request-header field provides a mechanism with the
TRACE and OPTIONS methods to limit the number of proxies or gateways
that can forward the request to the next inbound server.''')
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.environ.get('CONTENT_TYPE', ''))
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self):
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.environ.get('HTTP_PRAGMA', ''))
class CommonResponseDescriptorsMixin(object):
"""A mixin for :class:`BaseResponse` subclasses. Response objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
"""
def _get_mimetype(self):
ct = self.headers.get('content-type')
if ct:
return ct.split(';')[0].strip()
def _set_mimetype(self, value):
self.headers['Content-Type'] = get_content_type(value, self.charset)
def _get_mimetype_params(self):
def on_update(d):
self.headers['Content-Type'] = \
dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get('content-type', ''))[1]
return CallbackDict(d, on_update)
mimetype = property(_get_mimetype, _set_mimetype, doc='''
The mimetype (content type without charset etc.)''')
mimetype_params = property(_get_mimetype_params, doc='''
The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
''')
location = header_property('Location', doc='''
The Location response-header field is used to redirect the recipient
to a location other than the Request-URI for completion of the request
or identification of a new resource.''')
age = header_property('Age', None, parse_date, http_date, doc='''
The Age response-header field conveys the sender's estimate of the
amount of time since the response (or its revalidation) was
generated at the origin server.
Age values are non-negative decimal integers, representing time in
seconds.''')
content_type = header_property('Content-Type', doc='''
The Content-Type entity-header field indicates the media type of the
entity-body sent to the recipient or, in the case of the HEAD method,
the media type that would have been sent had the request been a GET.
''')
content_length = header_property('Content-Length', None, int, str, doc='''
The Content-Length entity-header field indicates the size of the
entity-body, in decimal number of OCTETs, sent to the recipient or,
in the case of the HEAD method, the size of the entity-body that would
have been sent had the request been a GET.''')
content_location = header_property('Content-Location', doc='''
The Content-Location entity-header field MAY be used to supply the
resource location for the entity enclosed in the message when that
entity is accessible from a location separate from the requested
resource's URI.''')
content_encoding = header_property('Content-Encoding', doc='''
The Content-Encoding entity-header field is used as a modifier to the
media-type. When present, its value indicates what additional content
codings have been applied to the entity-body, and thus what decoding
mechanisms must be applied in order to obtain the media-type
referenced by the Content-Type header field.''')
content_md5 = header_property('Content-MD5', doc='''
The Content-MD5 entity-header field, as defined in RFC 1864, is an
MD5 digest of the entity-body for the purpose of providing an
end-to-end message integrity check (MIC) of the entity-body. (Note:
a MIC is good for detecting accidental modification of the
entity-body in transit, but is not proof against malicious attacks.)
''')
date = header_property('Date', None, parse_date, http_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
expires = header_property('Expires', None, parse_date, http_date, doc='''
The Expires entity-header field gives the date/time after which the
response is considered stale. A stale cache entry may not normally be
returned by a cache.''')
last_modified = header_property('Last-Modified', None, parse_date,
http_date, doc='''
The Last-Modified entity-header field indicates the date and time at
which the origin server believes the variant was last modified.''')
def _get_retry_after(self):
value = self.headers.get('retry-after')
if value is None:
return
elif value.isdigit():
return datetime.utcnow() + timedelta(seconds=int(value))
return parse_date(value)
def _set_retry_after(self, value):
if value is None:
if 'retry-after' in self.headers:
del self.headers['retry-after']
return
elif isinstance(value, datetime):
value = http_date(value)
else:
value = str(value)
self.headers['Retry-After'] = value
retry_after = property(_get_retry_after, _set_retry_after, doc='''
The Retry-After response-header field can be used with a 503 (Service
Unavailable) response to indicate how long the service is expected
to be unavailable to the requesting client.
Time in seconds until expiration or date.''')
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self.headers:
del self.headers[name]
elif header_set:
self.headers[name] = header_set.to_header()
return parse_set_header(self.headers.get(name), on_update)
def fset(self, value):
if not value:
del self.headers[name]
elif isinstance(value, string_types):
self.headers[name] = value
else:
self.headers[name] = dump_header(value)
return property(fget, fset, doc=doc)
vary = _set_property('Vary', doc='''
The Vary field value indicates the set of request-header fields that
fully determines, while the response is fresh, whether a cache is
permitted to use the response to reply to a subsequent request
without revalidation.''')
content_language = _set_property('Content-Language', doc='''
The Content-Language entity-header field describes the natural
language(s) of the intended audience for the enclosed entity. Note
that this might not be equivalent to all the languages used within
the entity-body.''')
allow = _set_property('Allow', doc='''
The Allow entity-header field lists the set of methods supported
by the resource identified by the Request-URI. The purpose of this
field is strictly to inform the recipient of valid methods
associated with the resource. An Allow header field MUST be
present in a 405 (Method Not Allowed) response.''')
del _set_property, _get_mimetype, _set_mimetype, _get_retry_after, \
_set_retry_after
class WWWAuthenticateMixin(object):
"""Adds a :attr:`www_authenticate` property to a response object."""
@property
def www_authenticate(self):
"""The `WWW-Authenticate` header in a parsed form."""
def on_update(www_auth):
if not www_auth and 'www-authenticate' in self.headers:
del self.headers['www-authenticate']
elif www_auth:
self.headers['WWW-Authenticate'] = www_auth.to_header()
header = self.headers.get('www-authenticate')
return parse_www_authenticate_header(header, on_update)
class Request(BaseRequest, AcceptMixin, ETagRequestMixin,
UserAgentMixin, AuthorizationMixin,
CommonRequestDescriptorsMixin):
"""Full featured request object implementing the following mixins:
- :class:`AcceptMixin` for accept header parsing
- :class:`ETagRequestMixin` for etag and cache control handling
- :class:`UserAgentMixin` for user agent introspection
- :class:`AuthorizationMixin` for http auth handling
- :class:`CommonRequestDescriptorsMixin` for common headers
"""
class PlainRequest(StreamOnlyMixin, Request):
"""A request object without special form parsing capabilities.
.. versionadded:: 0.9
"""
class Response(BaseResponse, ETagResponseMixin, ResponseStreamMixin,
CommonResponseDescriptorsMixin,
WWWAuthenticateMixin):
"""Full featured response object implementing the following mixins:
- :class:`ETagResponseMixin` for etag and cache control handling
- :class:`ResponseStreamMixin` to add support for the `stream` property
- :class:`CommonResponseDescriptorsMixin` for various HTTP descriptors
- :class:`WWWAuthenticateMixin` for HTTP authentication support
"""
| agpl-3.0 |
suncycheng/intellij-community | python/helpers/coveragepy/coverage/config.py | 39 | 12763 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Config file for coverage.py"""
import collections
import os
import re
import sys
from coverage.backward import configparser, iitems, string_class
from coverage.misc import contract, CoverageException, isolate_module
os = isolate_module(os)
class HandyConfigParser(configparser.RawConfigParser):
"""Our specialization of ConfigParser."""
def __init__(self, section_prefix):
configparser.RawConfigParser.__init__(self)
self.section_prefix = section_prefix
def read(self, filename):
"""Read a file name as UTF-8 configuration data."""
kwargs = {}
if sys.version_info >= (3, 2):
kwargs['encoding'] = "utf-8"
return configparser.RawConfigParser.read(self, filename, **kwargs)
def has_option(self, section, option):
section = self.section_prefix + section
return configparser.RawConfigParser.has_option(self, section, option)
def has_section(self, section):
section = self.section_prefix + section
return configparser.RawConfigParser.has_section(self, section)
def options(self, section):
section = self.section_prefix + section
return configparser.RawConfigParser.options(self, section)
def get_section(self, section):
"""Get the contents of a section, as a dictionary."""
d = {}
for opt in self.options(section):
d[opt] = self.get(section, opt)
return d
def get(self, section, *args, **kwargs):
"""Get a value, replacing environment variables also.
The arguments are the same as `RawConfigParser.get`, but in the found
value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
environment variable ``WORD``.
Returns the finished value.
"""
section = self.section_prefix + section
v = configparser.RawConfigParser.get(self, section, *args, **kwargs)
def dollar_replace(m):
"""Called for each $replacement."""
# Only one of the groups will have matched, just get its text.
word = next(w for w in m.groups() if w is not None) # pragma: part covered
if word == "$":
return "$"
else:
return os.environ.get(word, '')
dollar_pattern = r"""(?x) # Use extended regex syntax
\$(?: # A dollar sign, then
(?P<v1>\w+) | # a plain word,
{(?P<v2>\w+)} | # or a {-wrapped word,
(?P<char>[$]) # or a dollar sign.
)
"""
v = re.sub(dollar_pattern, dollar_replace, v)
return v
def getlist(self, section, option):
"""Read a list of strings.
The value of `section` and `option` is treated as a comma- and newline-
separated list of strings. Each value is stripped of whitespace.
Returns the list of strings.
"""
value_list = self.get(section, option)
values = []
for value_line in value_list.split('\n'):
for value in value_line.split(','):
value = value.strip()
if value:
values.append(value)
return values
def getregexlist(self, section, option):
"""Read a list of full-line regexes.
The value of `section` and `option` is treated as a newline-separated
list of regexes. Each value is stripped of whitespace.
Returns the list of strings.
"""
line_list = self.get(section, option)
value_list = []
for value in line_list.splitlines():
value = value.strip()
try:
re.compile(value)
except re.error as e:
raise CoverageException(
"Invalid [%s].%s value %r: %s" % (section, option, value, e)
)
if value:
value_list.append(value)
return value_list
# The default line exclusion regexes.
DEFAULT_EXCLUDE = [
r'(?i)#\s*pragma[:\s]?\s*no\s*cover',
]
# The default partial branch regexes, to be modified by the user.
DEFAULT_PARTIAL = [
r'(?i)#\s*pragma[:\s]?\s*no\s*branch',
]
# The default partial branch regexes, based on Python semantics.
# These are any Python branching constructs that can't actually execute all
# their branches.
DEFAULT_PARTIAL_ALWAYS = [
'while (True|1|False|0):',
'if (True|1|False|0):',
]
class CoverageConfig(object):
"""Coverage.py configuration.
The attributes of this class are the various settings that control the
operation of coverage.py.
"""
def __init__(self):
"""Initialize the configuration attributes to their defaults."""
# Metadata about the config.
self.attempted_config_files = []
self.config_files = []
# Defaults for [run]
self.branch = False
self.concurrency = None
self.cover_pylib = False
self.data_file = ".coverage"
self.debug = []
self.note = None
self.parallel = False
self.plugins = []
self.source = None
self.timid = False
# Defaults for [report]
self.exclude_list = DEFAULT_EXCLUDE[:]
self.fail_under = 0
self.ignore_errors = False
self.include = None
self.omit = None
self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
self.partial_list = DEFAULT_PARTIAL[:]
self.precision = 0
self.show_missing = False
self.skip_covered = False
# Defaults for [html]
self.extra_css = None
self.html_dir = "htmlcov"
self.html_title = "Coverage report"
# Defaults for [xml]
self.xml_output = "coverage.xml"
self.xml_package_depth = 99
# Defaults for [paths]
self.paths = {}
# Options for plugins
self.plugin_options = {}
MUST_BE_LIST = ["omit", "include", "debug", "plugins", "concurrency"]
def from_args(self, **kwargs):
"""Read config values from `kwargs`."""
for k, v in iitems(kwargs):
if v is not None:
if k in self.MUST_BE_LIST and isinstance(v, string_class):
v = [v]
setattr(self, k, v)
@contract(filename=str)
def from_file(self, filename, section_prefix=""):
"""Read configuration from a .rc file.
`filename` is a file name to read.
Returns True or False, whether the file could be read.
"""
self.attempted_config_files.append(filename)
cp = HandyConfigParser(section_prefix)
try:
files_read = cp.read(filename)
except configparser.Error as err:
raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
if not files_read:
return False
self.config_files.extend(files_read)
try:
for option_spec in self.CONFIG_FILE_OPTIONS:
self._set_attr_from_config_option(cp, *option_spec)
except ValueError as err:
raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
# Check that there are no unrecognized options.
all_options = collections.defaultdict(set)
for option_spec in self.CONFIG_FILE_OPTIONS:
section, option = option_spec[1].split(":")
all_options[section].add(option)
for section, options in iitems(all_options):
if cp.has_section(section):
for unknown in set(cp.options(section)) - options:
if section_prefix:
section = section_prefix + section
raise CoverageException(
"Unrecognized option '[%s] %s=' in config file %s" % (
section, unknown, filename
)
)
# [paths] is special
if cp.has_section('paths'):
for option in cp.options('paths'):
self.paths[option] = cp.getlist('paths', option)
# plugins can have options
for plugin in self.plugins:
if cp.has_section(plugin):
self.plugin_options[plugin] = cp.get_section(plugin)
return True
CONFIG_FILE_OPTIONS = [
# These are *args for _set_attr_from_config_option:
# (attr, where, type_="")
#
# attr is the attribute to set on the CoverageConfig object.
# where is the section:name to read from the configuration file.
# type_ is the optional type to apply, by using .getTYPE to read the
# configuration value from the file.
# [run]
('branch', 'run:branch', 'boolean'),
('concurrency', 'run:concurrency', 'list'),
('cover_pylib', 'run:cover_pylib', 'boolean'),
('data_file', 'run:data_file'),
('debug', 'run:debug', 'list'),
('include', 'run:include', 'list'),
('note', 'run:note'),
('omit', 'run:omit', 'list'),
('parallel', 'run:parallel', 'boolean'),
('plugins', 'run:plugins', 'list'),
('source', 'run:source', 'list'),
('timid', 'run:timid', 'boolean'),
# [report]
('exclude_list', 'report:exclude_lines', 'regexlist'),
('fail_under', 'report:fail_under', 'int'),
('ignore_errors', 'report:ignore_errors', 'boolean'),
('include', 'report:include', 'list'),
('omit', 'report:omit', 'list'),
('partial_always_list', 'report:partial_branches_always', 'regexlist'),
('partial_list', 'report:partial_branches', 'regexlist'),
('precision', 'report:precision', 'int'),
('show_missing', 'report:show_missing', 'boolean'),
('skip_covered', 'report:skip_covered', 'boolean'),
('sort', 'report:sort'),
# [html]
('extra_css', 'html:extra_css'),
('html_dir', 'html:directory'),
('html_title', 'html:title'),
# [xml]
('xml_output', 'xml:output'),
('xml_package_depth', 'xml:package_depth', 'int'),
]
def _set_attr_from_config_option(self, cp, attr, where, type_=''):
"""Set an attribute on self if it exists in the ConfigParser."""
section, option = where.split(":")
if cp.has_option(section, option):
method = getattr(cp, 'get' + type_)
setattr(self, attr, method(section, option))
def get_plugin_options(self, plugin):
"""Get a dictionary of options for the plugin named `plugin`."""
return self.plugin_options.get(plugin, {})
def set_option(self, option_name, value):
"""Set an option in the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with `"run:branch"`.
`value` is the new value for the option.
"""
# Check all the hard-coded options.
for option_spec in self.CONFIG_FILE_OPTIONS:
attr, where = option_spec[:2]
if where == option_name:
setattr(self, attr, value)
return
# See if it's a plugin option.
plugin_name, _, key = option_name.partition(":")
if key and plugin_name in self.plugins:
self.plugin_options.setdefault(plugin_name, {})[key] = value
return
# If we get here, we didn't find the option.
raise CoverageException("No such option: %r" % option_name)
def get_option(self, option_name):
"""Get an option from the configuration.
`option_name` is a colon-separated string indicating the section and
option name. For example, the ``branch`` option in the ``[run]``
section of the config file would be indicated with `"run:branch"`.
Returns the value of the option.
"""
# Check all the hard-coded options.
for option_spec in self.CONFIG_FILE_OPTIONS:
attr, where = option_spec[:2]
if where == option_name:
return getattr(self, attr)
# See if it's a plugin option.
plugin_name, _, key = option_name.partition(":")
if key and plugin_name in self.plugins:
return self.plugin_options.get(plugin_name, {}).get(key)
# If we get here, we didn't find the option.
raise CoverageException("No such option: %r" % option_name)
| apache-2.0 |
CiscoSystems/networking-cisco | networking_cisco/plugins/ml2/drivers/cisco/apic/apic_topology.py | 2 | 12550 | # Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
import eventlet
eventlet.monkey_patch()
from neutron.agent.common import config
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import config as common_cfg
from neutron.common import rpc
from neutron.common import utils as neutron_utils
from neutron.db import agents_db
from neutron.i18n import _LE, _LI
from neutron import manager
from neutron.plugins.ml2.drivers import type_vlan # noqa
from neutron import service
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import periodic_task
from oslo_service import service as svc
from networking_cisco.plugins.ml2.drivers.cisco.apic import (
mechanism_apic as ma)
ACI_PORT_DESCR_FORMATS = [
r'topology/pod-1/node-(\d+)/sys/conng/path-\[eth(\d+)/(\d+)\]',
r'topology/pod-1/paths-(\d+)/pathep-\[eth(\d+)/(\d+)\]',
]
AGENT_FORCE_UPDATE_COUNT = 100
BINARY_APIC_SERVICE_AGENT = 'neutron-cisco-apic-service-agent'
BINARY_APIC_HOST_AGENT = 'neutron-cisco-apic-host-agent'
TOPIC_APIC_SERVICE = 'apic-service'
TYPE_APIC_SERVICE_AGENT = 'cisco-apic-service-agent'
TYPE_APIC_HOST_AGENT = 'cisco-apic-host-agent'
LOG = logging.getLogger(__name__)
class ApicTopologyService(manager.Manager):
target = oslo_messaging.Target(version='1.1')
def __init__(self, host=None):
if host is None:
host = neutron_utils.get_hostname()
super(ApicTopologyService, self).__init__(host=host)
self.conf = cfg.CONF.ml2_cisco_apic
self.conn = None
self.peers = {}
self.invalid_peers = []
self.dispatcher = None
self.state = None
self.state_agent = None
self.topic = TOPIC_APIC_SERVICE
self.apic_manager = ma.APICMechanismDriver.get_apic_manager(False)
def init_host(self):
LOG.info(_LI("APIC service agent starting ..."))
self.state = {
'binary': BINARY_APIC_SERVICE_AGENT,
'host': self.host,
'topic': self.topic,
'configurations': {},
'start_flag': True,
'agent_type': TYPE_APIC_SERVICE_AGENT,
}
self.conn = rpc.create_connection(new=True)
self.dispatcher = [self, agents_db.AgentExtRpcCallback()]
self.conn.create_consumer(
self.topic, self.dispatcher, fanout=True)
self.conn.consume_in_threads()
def after_start(self):
LOG.info(_LI("APIC service agent started"))
def report_send(self, context):
if not self.state_agent:
return
LOG.debug("APIC service agent: sending report state")
try:
self.state_agent.report_state(context, self.state)
self.state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
# ignore it
return
except Exception:
LOG.exception(_LE("APIC service agent: failed in reporting state"))
@lockutils.synchronized('apic_service')
def update_link(self, context,
host, interface, mac,
switch, module, port):
LOG.debug("APIC service agent: received update_link: %s",
", ".join(map(str,
[host, interface, mac, switch, module, port])))
nlink = (host, interface, mac, switch, module, port)
clink = self.peers.get((host, interface), None)
if switch == 0:
# this is a link delete, remove it
if clink is not None:
self.apic_manager.remove_hostlink(*clink)
self.peers.pop((host, interface))
else:
if clink is None:
# add new link to database
self.apic_manager.add_hostlink(*nlink)
self.peers[(host, interface)] = nlink
elif clink != nlink:
# delete old link and add new one (don't update in place)
self.apic_manager.remove_hostlink(*clink)
self.peers.pop((host, interface))
self.apic_manager.add_hostlink(*nlink)
self.peers[(host, interface)] = nlink
class ApicTopologyServiceNotifierApi(object):
def __init__(self):
target = oslo_messaging.Target(topic=TOPIC_APIC_SERVICE, version='1.0')
self.client = rpc.get_client(target)
def update_link(self, context, host, interface, mac, switch, module, port):
cctxt = self.client.prepare(version='1.1', fanout=True)
cctxt.cast(context, 'update_link', host=host, interface=interface,
mac=mac, switch=switch, module=module, port=port)
def delete_link(self, context, host, interface):
cctxt = self.client.prepare(version='1.1', fanout=True)
cctxt.cast(context, 'delete_link', host=host, interface=interface,
mac=None, switch=0, module=0, port=0)
class ApicTopologyAgent(manager.Manager):
def __init__(self, host=None):
if host is None:
host = neutron_utils.get_hostname()
super(ApicTopologyAgent, self).__init__(host=host)
self.conf = cfg.CONF.ml2_cisco_apic
self.count_current = 0
self.count_force_send = AGENT_FORCE_UPDATE_COUNT
self.interfaces = {}
self.lldpcmd = None
self.peers = {}
self.port_desc_re = map(re.compile, ACI_PORT_DESCR_FORMATS)
self.service_agent = ApicTopologyServiceNotifierApi()
self.state = None
self.state_agent = None
self.topic = TOPIC_APIC_SERVICE
self.uplink_ports = []
self.invalid_peers = []
def init_host(self):
LOG.info(_LI("APIC host agent: agent starting on %s"), self.host)
self.state = {
'binary': BINARY_APIC_HOST_AGENT,
'host': self.host,
'topic': self.topic,
'configurations': {},
'start_flag': True,
'agent_type': TYPE_APIC_HOST_AGENT,
}
self.uplink_ports = []
for inf in self.conf.apic_host_uplink_ports:
if ip_lib.device_exists(inf):
self.uplink_ports.append(inf)
else:
# ignore unknown interfaces
LOG.error(_LE("No such interface (ignored): %s"), inf)
self.lldpcmd = ['lldpctl', '-f', 'keyvalue'] + self.uplink_ports
def after_start(self):
LOG.info(_LI("APIC host agent: started on %s"), self.host)
@periodic_task.periodic_task
def _check_for_new_peers(self, context):
LOG.debug("APIC host agent: _check_for_new_peers")
if not self.lldpcmd:
return
try:
# Check if we must send update even if there is no change
force_send = False
self.count_current += 1
if self.count_current >= self.count_force_send:
force_send = True
self.count_current = 0
# Check for new peers
new_peers = self._get_peers()
new_peers = self._valid_peers(new_peers)
# Make a copy of current interfaces
curr_peers = {}
for interface in self.peers:
curr_peers[interface] = self.peers[interface]
# Based curr -> new updates, add the new interfaces
self.peers = {}
for interface in new_peers:
peer = new_peers[interface]
self.peers[interface] = peer
if (interface in curr_peers and
curr_peers[interface] != peer):
self.service_agent.update_link(
context, peer[0], peer[1], None, 0, 0, 0)
if (interface not in curr_peers or
curr_peers[interface] != peer or
force_send):
self.service_agent.update_link(context, *peer)
if interface in curr_peers:
curr_peers.pop(interface)
# Any interface still in curr_peers need to be deleted
for peer in curr_peers.values():
self.service_agent.update_link(
context, peer[0], peer[1], None, 0, 0, 0)
except Exception:
LOG.exception(_LE("APIC service agent: exception in LLDP parsing"))
def _get_peers(self):
peers = {}
lldpkeys = utils.execute(self.lldpcmd, run_as_root=True)
for line in lldpkeys.splitlines():
if '=' not in line:
continue
fqkey, value = line.split('=', 1)
lldp, interface, key = fqkey.split('.', 2)
if key == 'port.descr':
for regexp in self.port_desc_re:
match = regexp.match(value)
if match:
mac = self._get_mac(interface)
switch, module, port = match.group(1, 2, 3)
peer = (self.host, interface, mac,
switch, module, port)
if interface not in peers:
peers[interface] = []
peers[interface].append(peer)
return peers
def _valid_peers(self, peers):
# Reduce the peers array to one valid peer per interface
# NOTE:
# There is a bug in lldpd daemon that it keeps reporting
# old peers even after their updates have stopped
# we keep track of that report remove them from peers
valid_peers = {}
invalid_peers = []
for interface in peers:
curr_peer = None
for peer in peers[interface]:
if peer in self.invalid_peers or curr_peer:
invalid_peers.append(peer)
else:
curr_peer = peer
if curr_peer is not None:
valid_peers[interface] = curr_peer
self.invalid_peers = invalid_peers
return valid_peers
def _get_mac(self, interface):
if interface in self.interfaces:
return self.interfaces[interface]
try:
mac = ip_lib.IPDevice(interface).link.address
self.interfaces[interface] = mac
return mac
except Exception:
# we can safely ignore it, it is only needed for debugging
LOG.exception(
_LE("APIC service agent: can not get MACaddr for %s"),
interface)
def report_send(self, context):
if not self.state_agent:
return
LOG.debug("APIC host agent: sending report state")
try:
self.state_agent.report_state(context, self.state)
self.state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
# ignore it
return
except Exception:
LOG.exception(_LE("APIC host agent: failed in reporting state"))
def launch(binary, manager, topic=None):
cfg.CONF(project='neutron')
common_cfg.init(sys.argv[1:])
config.setup_logging()
report_period = cfg.CONF.ml2_cisco_apic.apic_agent_report_interval
poll_period = cfg.CONF.ml2_cisco_apic.apic_agent_poll_interval
server = service.Service.create(
binary=binary, manager=manager, topic=topic,
report_interval=report_period, periodic_interval=poll_period)
svc.launch(cfg.CONF, server).wait()
def service_main():
launch(
BINARY_APIC_SERVICE_AGENT,
'networking_cisco.plugins.ml2.drivers.' +
'cisco.apic.apic_topology.ApicTopologyService',
TOPIC_APIC_SERVICE)
def agent_main():
launch(
BINARY_APIC_HOST_AGENT,
'networking_cisco.plugins.ml2.drivers.' +
'cisco.apic.apic_topology.ApicTopologyAgent')
| apache-2.0 |
deniclab/pyto_seg_slurm | segmentation/batch_mito_seg.py | 1 | 1273 | import os
import sys
sys.path.append('/n/denic_lab/Users/nweir/python_packages/')
import argparse
from pyto_segmenter import MitoSegment
parser = argparse.ArgumentParser(description = 'Segment mitochondria from \
images and return pickled objects.')
parser.add_argument('-d', '--img_dir', required = True,
help = 'directory containing images to segment.')
parser.add_argument('images', nargs = '*',
help = 'filenames for images, can be full path or just \
the image filename.')
args = parser.parse_args()
print(args)
img_dir = args.img_dir
images = [img[2:] for img in args.images]
def main():
for img in images:
os.chdir(img_dir)
print('SEGMENTING ' + img)
mito_segmenter = MitoSegment.MitoSegmenter(img, seg_method = 'canny',
high_threshold = 250,
low_threshold = 125,
min_cutoff = 2300)
mito_obj = mito_segmenter.segment()
mito_obj.rm_border_objs()
mito_obj.pickle(output_dir = img_dir + '/pickles')
del mito_obj
if __name__ == '__main__':
main()
| mit |
ziggear/shadowsocks | shadowsocks/shell.py | 270 | 12676 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork
from shadowsocks import encrypt
VERBOSE_LEVEL = 5
verbose = 0
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1)
def print_exception(e):
global verbose
logging.error(e)
if verbose > 0:
import traceback
traceback.print_exc()
def print_shadowsocks():
version = ''
try:
import pkg_resources
version = pkg_resources.get_distribution('shadowsocks').version
except Exception:
pass
print('Shadowsocks %s' % version)
def find_config():
config_path = 'config.json'
if os.path.exists(config_path):
return config_path
config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json')
if os.path.exists(config_path):
return config_path
return None
def check_config(config, is_local):
if config.get('daemon', None) == 'stop':
# no need to specify configuration for daemon stop
return
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if 'server_port' in config and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in ['127.0.0.1', 'localhost']:
logging.warn('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if (config.get('method', '') or '').lower() == 'table':
logging.warn('warning: table is not safe; please use a safer cipher, '
'like AES-256-CFB')
if (config.get('method', '') or '').lower() == 'rc4':
logging.warn('warning: RC4 is not safe; please use a safer cipher, '
'like AES-256-CFB')
if config.get('timeout', 300) < 100:
logging.warn('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warn('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
sys.exit(1)
if config.get('user', None) is not None:
if os.name != 'posix':
logging.error('user can be used only on Unix')
sys.exit(1)
encrypt.try_cipher(config['password'], config['method'])
def get_config(is_local):
global verbose
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=',
'version']
else:
shortopts = 'hd:s:p:k:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
'forbidden-ip=', 'user=', 'manager-address=', 'version']
try:
config_path = find_config()
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
if config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = parse_json_in_str(f.read().decode('utf8'))
except ValueError as e:
logging.error('found an error in config.json: %s',
e.message)
sys.exit(1)
else:
config = {}
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_str(value)
elif key == '-m':
config['method'] = to_str(value)
elif key == '-b':
config['local_address'] = to_str(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key == '--manager-address':
config['manager_address'] = value
elif key == '--user':
config['user'] = to_str(value)
elif key == '--forbidden-ip':
config['forbidden_ip'] = to_str(value).split(',')
elif key in ('-h', '--help'):
if is_local:
print_local_help()
else:
print_server_help()
sys.exit(0)
elif key == '--version':
print_shadowsocks()
sys.exit(0)
elif key == '-d':
config['daemon'] = to_str(value)
elif key == '--pid-file':
config['pid-file'] = to_str(value)
elif key == '--log-file':
config['log-file'] = to_str(value)
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = to_bytes(config.get('password', b''))
config['method'] = to_str(config.get('method', 'aes-256-cfb'))
config['port_password'] = config.get('port_password', None)
config['timeout'] = int(config.get('timeout', 300))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log')
config['verbose'] = config.get('verbose', False)
config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = to_str(config['server'])
else:
config['server'] = to_str(config.get('server', '0.0.0.0'))
try:
config['forbidden_ip'] = \
IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
except Exception as e:
logging.error(e)
sys.exit(2)
config['server_port'] = config.get('server_port', 8388)
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
verbose = config['verbose']
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config, is_local)
return config
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def print_server_help():
print('''usage: ssserver [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address, default: 0.0.0.0
-p SERVER_PORT server port, default: 8388
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
--workers WORKERS number of workers, available on Unix/Linux
--forbidden-ip IPLIST comma seperated IP list forbidden to connect
--manager-address ADDR optional server manager UDP address, see wiki
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def parse_json_in_str(data):
# parse json and convert everything from unicode to str
return json.loads(data, object_hook=_decode_dict)
| apache-2.0 |
ak-67/ZeroNet | src/Test/TestWeb.py | 2 | 3893 | import urllib
import pytest
try:
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import staleness_of
from selenium.common.exceptions import NoSuchElementException
except:
pass
class WaitForPageLoad(object):
def __init__(self, browser):
self.browser = browser
def __enter__(self):
self.old_page = self.browser.find_element_by_tag_name('html')
def __exit__(self, *args):
WebDriverWait(self.browser, 20).until(staleness_of(self.old_page))
@pytest.mark.usefixtures("resetSettings")
@pytest.mark.webtest
class TestWeb:
def testFileSecurity(self, site_url):
assert "Forbidden" in urllib.urlopen("%s/media/./sites.json" % site_url).read()
assert "Forbidden" in urllib.urlopen("%s/media/../config.py" % site_url).read()
assert "Forbidden" in urllib.urlopen("%s/media/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../sites.json" % site_url).read()
assert "Forbidden" in urllib.urlopen("%s/media/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/..//sites.json" % site_url).read()
assert "Forbidden" in urllib.urlopen("%s/media/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../../zeronet.py" % site_url).read()
assert "Forbidden" in urllib.urlopen("%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../sites.json" % site_url).read()
assert "Forbidden" in urllib.urlopen("%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/..//sites.json" % site_url).read()
assert "Forbidden" in urllib.urlopen("%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../../zeronet.py" % site_url).read()
def testHomepage(self, browser, site_url):
browser.get("%s" % site_url)
assert browser.title == "ZeroHello - ZeroNet"
def testLinkSecurity(self, browser, site_url):
browser.get("%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/test/security.html" % site_url)
assert browser.title == "ZeroHello - ZeroNet"
assert browser.current_url == "%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/test/security.html" % site_url
# Switch to inner frame
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
assert "wrapper_nonce" in browser.current_url
browser.switch_to.default_content()
# Clicking on links without target
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
with WaitForPageLoad(browser):
browser.find_element_by_id("link_to_current").click()
assert "wrapper_nonce" not in browser.current_url # The browser object back to default content
assert "Forbidden" not in browser.page_source
# Check if we have frame inside frame
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
with pytest.raises(NoSuchElementException):
assert not browser.find_element_by_id("inner-iframe")
browser.switch_to.default_content()
# Clicking on link with target=_top
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
with WaitForPageLoad(browser):
browser.find_element_by_id("link_to_top").click()
assert "wrapper_nonce" not in browser.current_url # The browser object back to default content
assert "Forbidden" not in browser.page_source
browser.switch_to.default_content()
# Try to escape from inner_frame
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
assert "wrapper_nonce" in browser.current_url # Make sure we are inside of the inner-iframe
with WaitForPageLoad(browser):
browser.execute_script("window.top.location = window.location")
assert "wrapper_nonce" in browser.current_url # We try to use nonce-ed html without iframe
assert "Forbidden" in browser.page_source # Only allow to use nonce once-time
browser.switch_to.default_content()
| gpl-2.0 |
bobmyhill/burnman | tests/test_seismic.py | 7 | 2377 | from __future__ import absolute_import
import unittest
import os
import sys
sys.path.insert(1, os.path.abspath('..'))
import warnings
import burnman
from burnman import minerals
from burnman import seismic
from util import BurnManTest
class test_seismic(BurnManTest):
def test_internal_depth_list(self):
models = [burnman.seismic.PREM(), burnman.seismic.STW105(),
burnman.seismic.AK135(), burnman.seismic.IASP91()]
ref_depth_lists = {'PREM': [0.0, 6371000.0, 94],
'STW105': [0.0, 6371000.0, 750],
'AK135': [0.0, 6371000.0, 145],
'IASP91': [0.0, 6371000.0, 152]}
for model in models:
dl = model.internal_depth_list()
name = model.__class__.__name__
stats = [min(dl), max(dl), len(dl)]
# print model.__class__.__name__, stats
self.assertArraysAlmostEqual(stats, ref_depth_lists[name])
def test_evaluate(self):
models = [burnman.seismic.PREM(),
burnman.seismic.Fast(),
burnman.seismic.Slow(),
burnman.seismic.STW105(), burnman.seismic.AK135(),
burnman.seismic.IASP91()]
ref = {
'PREM': [12817.6924, 6932.8549000000003, 10010.358087588364, 5120.6290999999992],
'Fast': [12795.360742611414, 6941.4225568201909, 9973.8053779446909, 5120.6290999999992],
'Slow': [12795.360742611414, 6904.3291880013148, 10008.07546360332, 5120.6290999999992],
'STW105': [12817.85012987013, 6932.9342065251822, 10010.486818122406, 5120.6749255622426],
'AK135': [12798.468686868688, 6920.5212121212116, 9997.1320353488773, 5103.8969696969698],
'IASP91': [12794.4, 6921.0, 9991.4805389391604], }
for model in models:
dl = model.internal_depth_list()
name = model.__class__.__name__
depth = 2000e3
vars = ['v_p', 'v_s', 'v_phi', 'density'] # skip gravity
if name == "IASP91":
vars = vars[0:-1] # skip density
result = model.evaluate(vars, [depth])
result = list(result.T[0])
# print "'%s': %s," % (name, result)
self.assertArraysAlmostEqual(result, ref[name])
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
patilsangram/erpnext | erpnext/assets/doctype/asset_maintenance_log/asset_maintenance_log.py | 21 | 2174 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
from frappe.utils import nowdate, getdate
from erpnext.assets.doctype.asset_maintenance.asset_maintenance import calculate_next_due_date
class AssetMaintenanceLog(Document):
def validate(self):
if getdate(self.due_date) < getdate(nowdate()):
self.maintenance_status = "Overdue"
if self.maintenance_status == "Completed" and not self.completion_date:
frappe.throw(_("Please select Completion Date for Completed Asset Maintenance Log"))
if self.maintenance_status != "Completed" and self.completion_date:
frappe.throw(_("Please select Maintenance Status as Completed or remove Completion Date"))
def on_submit(self):
if self.maintenance_status not in ['Completed', 'Cancelled']:
frappe.throw(_("Maintenance Status has to be Cancelled or Completed to Submit"))
self.update_maintenance_task()
def update_maintenance_task(self):
asset_maintenance_doc = frappe.get_doc('Asset Maintenance Task', self.task)
if self.maintenance_status == "Completed":
if asset_maintenance_doc.last_completion_date != self.completion_date:
next_due_date = calculate_next_due_date(periodicity = self.periodicity, last_completion_date = self.completion_date)
asset_maintenance_doc.last_completion_date = self.completion_date
asset_maintenance_doc.next_due_date = next_due_date
asset_maintenance_doc.maintenance_status = "Planned"
asset_maintenance_doc.save()
if self.maintenance_status == "Cancelled":
asset_maintenance_doc.maintenance_status = "Cancelled"
asset_maintenance_doc.save()
asset_maintenance_doc = frappe.get_doc('Asset Maintenance', self.asset_maintenance)
asset_maintenance_doc.save()
@frappe.whitelist()
def get_maintenance_tasks(doctype, txt, searchfield, start, page_len, filters):
asset_maintenance_tasks = frappe.db.get_values('Asset Maintenance Task', {'parent':filters.get("asset_maintenance")}, 'maintenance_task')
return asset_maintenance_tasks
| gpl-3.0 |
zerobatu/edx-platform | common/djangoapps/embargo/forms.py | 55 | 3038 | """
Defines forms for providing validation of embargo admin details.
"""
from django import forms
from django.utils.translation import ugettext as _
import ipaddr
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from embargo.models import IPFilter, RestrictedCourse
class RestrictedCourseForm(forms.ModelForm):
"""Validate course keys for the RestrictedCourse model.
The default behavior in Django admin is to:
* Save course keys for courses that do not exist.
* Return a 500 response if the course key format is invalid.
Using this form ensures that we display a user-friendly
error message instead.
"""
class Meta(object): # pylint: disable=missing-docstring
model = RestrictedCourse
def clean_course_key(self):
"""Validate the course key.
Checks that the key format is valid and that
the course exists. If not, displays an error message.
Arguments:
field_name (str): The name of the field to validate.
Returns:
CourseKey
"""
cleaned_id = self.cleaned_data['course_key']
error_msg = _('COURSE NOT FOUND. Please check that the course ID is valid.')
try:
course_key = CourseKey.from_string(cleaned_id)
except InvalidKeyError:
raise forms.ValidationError(error_msg)
if not modulestore().has_course(course_key):
raise forms.ValidationError(error_msg)
return course_key
class IPFilterForm(forms.ModelForm):
"""Form validating entry of IP addresses"""
class Meta(object): # pylint: disable=missing-docstring
model = IPFilter
def _is_valid_ip(self, address):
"""Whether or not address is a valid ipv4 address or ipv6 address"""
try:
# Is this an valid ip address?
ipaddr.IPNetwork(address)
except ValueError:
return False
return True
def _valid_ip_addresses(self, addresses):
"""
Checks if a csv string of IP addresses contains valid values.
If not, raises a ValidationError.
"""
if addresses == '':
return ''
error_addresses = []
for addr in addresses.split(','):
address = addr.strip()
if not self._is_valid_ip(address):
error_addresses.append(address)
if error_addresses:
msg = 'Invalid IP Address(es): {0}'.format(error_addresses)
msg += ' Please fix the error(s) and try again.'
raise forms.ValidationError(msg)
return addresses
def clean_whitelist(self):
"""Validates the whitelist"""
whitelist = self.cleaned_data["whitelist"]
return self._valid_ip_addresses(whitelist)
def clean_blacklist(self):
"""Validates the blacklist"""
blacklist = self.cleaned_data["blacklist"]
return self._valid_ip_addresses(blacklist)
| agpl-3.0 |
dev-elixir/hx_wt88047 | tools/perf/util/setup.py | 2079 | 1438 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
michaelhotss/googletest | scripts/upload.py | 2511 | 51024 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = mimetype and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = mimetype and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| bsd-3-clause |
jamesylgan/szechuantech | python-scripts/six.py | 172 | 30888 | # Copyright (c) 2010-2017 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utilities for writing code that runs on Python 2 and 3"""
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.11.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
try:
if from_value is None:
raise value
raise value from from_value
finally:
value = None
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| mit |
wevoice/wesub | apps/webdriver_testing/pages/site_pages/auth_page.py | 5 | 1209 | #!/usr/bin/env python
import time
from webdriver_testing.pages.site_pages import UnisubsPage
class AuthPage(UnisubsPage):
"""
Unisubs page contains common web elements found across
all Universal Subtitles pages. Every new page class is derived from
UnisubsPage so that every child class can have access to common web
elements and methods that pertain to those elements.
"""
_SITE_LOGIN_USER_ID = "input#id_username"
_SITE_LOGIN_USER_PW = "input#id_password"
_SITE_LOGIN_SUBMIT = "form button[value=login]"
_URL = 'auth/login'
def login(self, username, passw):
"""Log in with the specified account type - default as a no-priv user.
"""
curr_page = self.browser.current_url
if self._URL not in curr_page and not self.logged_in() == True:
assert self.is_element_present(self._LOGIN)
self.click_by_css(self._LOGIN)
self.wait_for_element_present(self._SITE_LOGIN_USER_ID)
self.type_by_css(self._SITE_LOGIN_USER_ID, username)
self.type_by_css(self._SITE_LOGIN_USER_PW, passw)
self.click_by_css(self._SITE_LOGIN_SUBMIT)
self.wait_for_element_present(self._USER_MENU)
| agpl-3.0 |
McrRaspJam/Workshops | 004_Picamera_Pythonimaging/3-gradients/7_multi.py | 2 | 1144 |
#import libraries
import picamera
from time import sleep
from PIL import Image
#set up the camera
camera = picamera.PiCamera()
try:
#capture at maximum resolution (~5MP)
camera.resolution = (1280, 720)
camera.framerate = 60
camera.vflip = True
camera.hflip = True
camera.start_preview()
#allow camera to AWB
sleep(1)
camera.capture('1_unedited.jpg')
#load the image back into python
photo = Image.open('1_unedited.jpg')
pixels = photo.load()
#apply an edit to each pixel
try:
for i in range(photo.size[0]):
for j in range(photo.size[1]):
#seperate the current pixel
pixel = pixels[i,j]
#seperate the colours
r = pixel[0]
g = pixel[1]
b = pixel[2]
#Perform our edits
r_temp = (i/6) / 200.0
g_temp = 100 / 200.0
b_temp = (j/2) / 200.0
r_temp = int(r_temp * r)
g_temp = int(g_temp * g)
b_temp = int(b_temp * b)
r = r_temp + r / 2
g = g_temp + g / 2
b = b_temp + b / 2
#update the pixel
pixel = (r, g, b)
#place the pixel back in the image
pixels[i,j] = pixel
finally:
photo.save('7_multi.jpg', quality=90)
finally:
camera.close()
| cc0-1.0 |
angdraug/nova | nova/cmd/network.py | 15 | 2418 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Nova Network."""
import sys
import traceback
from oslo.config import cfg
from nova.conductor import rpcapi as conductor_rpcapi
from nova import config
import nova.db.api
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as objects_base
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
from nova import utils
from nova import version
CONF = cfg.CONF
CONF.import_opt('network_topic', 'nova.network.rpcapi')
CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
def block_db_access():
class NoDB(object):
def __getattr__(self, attr):
return self
def __call__(self, *args, **kwargs):
stacktrace = "".join(traceback.format_stack())
LOG = logging.getLogger('nova.network')
LOG.error(_('No db access allowed in nova-network: %s'),
stacktrace)
raise exception.DBNotAllowed('nova-network')
nova.db.api.IMPL = NoDB()
def main():
config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
objects.register_all()
gmr.TextGuruMeditation.setup_autorun(version)
if not CONF.conductor.use_local:
block_db_access()
objects_base.NovaObject.indirection_api = \
conductor_rpcapi.ConductorAPI()
server = service.Service.create(binary='nova-network',
topic=CONF.network_topic,
db_allowed=CONF.conductor.use_local)
service.serve(server)
service.wait()
| apache-2.0 |
gioman/QGIS | python/plugins/processing/algs/gdal/GridInvDist.py | 1 | 5580 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GridInvDist.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools import dataobjects
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class GridInvDist(GdalAlgorithm):
INPUT = 'INPUT'
Z_FIELD = 'Z_FIELD'
POWER = 'POWER'
SMOTHING = 'SMOTHING'
RADIUS_1 = 'RADIUS_1'
RADIUS_2 = 'RADIUS_2'
MAX_POINTS = 'MAX_POINTS'
MIN_POINTS = 'MIN_POINTS'
ANGLE = 'ANGLE'
NODATA = 'NODATA'
OUTPUT = 'OUTPUT'
RTYPE = 'RTYPE'
TYPE = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64']
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'grid.png'))
def name(self):
return 'gridinvdist'
def displayName(self):
return self.tr('Grid (Inverse distance to a power)')
def group(self):
return self.tr('Raster analysis')
def defineCharacteristics(self):
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [dataobjects.TYPE_VECTOR_POINT]))
self.addParameter(ParameterTableField(self.Z_FIELD,
self.tr('Z field'), self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER, True))
self.addParameter(ParameterNumber(self.POWER,
self.tr('Power'), 0, 100.0, 2.0))
self.addParameter(ParameterNumber(self.SMOTHING,
self.tr('Smoothing'), 0.0, 99999999.999999, 0.0))
self.addParameter(ParameterNumber(self.RADIUS_1,
self.tr('Radius 1'), 0.0, 99999999.999999, 0.0))
self.addParameter(ParameterNumber(self.RADIUS_2,
self.tr('Radius 2'), 0.0, 99999999.999999, 0.0))
self.addParameter(ParameterNumber(self.MAX_POINTS,
self.tr('Max points'), 0.0, 99999999.999999, 0.0))
self.addParameter(ParameterNumber(self.MIN_POINTS,
self.tr('Min points'), 0.0, 99999999.999999, 0.0))
self.addParameter(ParameterNumber(self.ANGLE,
self.tr('Angle'), 0.0, 359.0, 0.0))
self.addParameter(ParameterNumber(self.NODATA,
self.tr('Nodata'), -99999999.999999, 99999999.999999, 0.0))
self.addParameter(ParameterSelection(self.RTYPE,
self.tr('Output raster type'), self.TYPE, 5))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Interpolated IDW')))
def getConsoleCommands(self):
arguments = ['-l']
arguments.append(
os.path.basename(os.path.splitext(
str(self.getParameterValue(self.INPUT)))[0]))
fieldName = self.getParameterValue(self.Z_FIELD)
if fieldName is not None and fieldName != '':
arguments.append('-zfield')
arguments.append(fieldName)
params = 'invdist'
params += ':power=%s' % self.getParameterValue(self.POWER)
params += ':smothing=%s' % self.getParameterValue(self.SMOTHING)
params += ':radius1=%s' % self.getParameterValue(self.RADIUS_1)
params += ':radius2=%s' % self.getParameterValue(self.RADIUS_2)
params += ':angle=%s' % self.getParameterValue(self.ANGLE)
params += ':max_points=%s' % self.getParameterValue(self.MAX_POINTS)
params += ':min_points=%s' % self.getParameterValue(self.MIN_POINTS)
params += ':nodata=%s' % self.getParameterValue(self.NODATA)
arguments.append('-a')
arguments.append(params)
arguments.append('-ot')
arguments.append(self.TYPE[self.getParameterValue(self.RTYPE)])
arguments.append(str(self.getParameterValue(self.INPUT)))
arguments.append(str(self.getOutputValue(self.OUTPUT)))
return ['gdal_grid', GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
JerryXia/fastgoagent | goagent/server/uploader/google/appengine/tools/adaptive_thread_pool.py | 13 | 14123 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Provides thread-pool-like functionality for workers accessing App Engine.
The pool adapts to slow or timing out requests by reducing the number of
active workers, or increasing the number when requests latency reduces.
"""
import logging
import Queue
import sys
import threading
import time
import traceback
from google.appengine.tools.requeue import ReQueue
logger = logging.getLogger('google.appengine.tools.adaptive_thread_pool')
_THREAD_SHOULD_EXIT = '_THREAD_SHOULD_EXIT'
INITIAL_BACKOFF = 1.0
BACKOFF_FACTOR = 2.0
class Error(Exception):
"""Base-class for exceptions in this module."""
class WorkItemError(Error):
"""Error while processing a WorkItem."""
class RetryException(Error):
"""A non-fatal exception that indicates that a work item should be retried."""
def InterruptibleSleep(sleep_time):
"""Puts thread to sleep, checking this threads exit_flag four times a second.
Args:
sleep_time: Time to sleep.
"""
slept = 0.0
epsilon = .0001
thread = threading.currentThread()
while slept < sleep_time - epsilon:
remaining = sleep_time - slept
this_sleep_time = min(remaining, 0.25)
time.sleep(this_sleep_time)
slept += this_sleep_time
if thread.exit_flag:
return
class WorkerThread(threading.Thread):
"""A WorkerThread to execute WorkItems.
Attributes:
exit_flag: A boolean indicating whether this thread should stop
its work and exit.
"""
def __init__(self, thread_pool, thread_gate, name=None):
"""Initialize a WorkerThread instance.
Args:
thread_pool: An AdaptiveThreadPool instance.
thread_gate: A ThreadGate instance.
name: A name for this WorkerThread.
"""
threading.Thread.__init__(self)
self.setDaemon(True)
self.exit_flag = False
self.__error = None
self.__traceback = None
self.__thread_pool = thread_pool
self.__work_queue = thread_pool.requeue
self.__thread_gate = thread_gate
if not name:
self.__name = 'Anonymous_' + self.__class__.__name__
else:
self.__name = name
def run(self):
"""Perform the work of the thread."""
logger.debug('[%s] %s: started', self.getName(), self.__class__.__name__)
try:
self.WorkOnItems()
except:
self.SetError()
logger.debug('[%s] %s: exiting', self.getName(), self.__class__.__name__)
def SetError(self):
"""Sets the error and traceback information for this thread.
This must be called from an exception handler.
"""
if not self.__error:
exc_info = sys.exc_info()
self.__error = exc_info[1]
self.__traceback = exc_info[2]
logger.exception('[%s] %s:', self.getName(), self.__class__.__name__)
def WorkOnItems(self):
"""Perform the work of a WorkerThread."""
while not self.exit_flag:
item = None
self.__thread_gate.StartWork()
try:
status, instruction = WorkItem.FAILURE, ThreadGate.DECREASE
try:
if self.exit_flag:
instruction = ThreadGate.HOLD
break
try:
item = self.__work_queue.get(block=True, timeout=1.0)
except Queue.Empty:
instruction = ThreadGate.HOLD
continue
if item == _THREAD_SHOULD_EXIT or self.exit_flag:
status, instruction = WorkItem.SUCCESS, ThreadGate.HOLD
break
logger.debug('[%s] Got work item %s', self.getName(), item)
status, instruction = item.PerformWork(self.__thread_pool)
except RetryException:
status, instruction = WorkItem.RETRY, ThreadGate.HOLD
except:
self.SetError()
raise
finally:
try:
if item:
if status == WorkItem.SUCCESS:
self.__work_queue.task_done()
elif status == WorkItem.RETRY:
try:
self.__work_queue.reput(item, block=False)
except Queue.Full:
logger.error('[%s] Failed to reput work item.', self.getName())
raise Error('Failed to reput work item')
else:
if not self.__error:
if item.error:
self.__error = item.error
self.__traceback = item.traceback
else:
self.__error = WorkItemError(
'Fatal error while processing %s' % item)
raise self.__error
finally:
self.__thread_gate.FinishWork(instruction=instruction)
def CheckError(self):
"""If an error is present, then log it."""
if self.__error:
logger.error('Error in %s: %s', self.getName(), self.__error)
if self.__traceback:
logger.debug('%s', ''.join(traceback.format_exception(
self.__error.__class__,
self.__error,
self.__traceback)))
def __str__(self):
return self.__name
class AdaptiveThreadPool(object):
"""A thread pool which processes WorkItems from a queue.
Attributes:
requeue: The requeue instance which holds work items for this
thread pool.
"""
def __init__(self,
num_threads,
queue_size=None,
base_thread_name=None,
worker_thread_factory=WorkerThread,
queue_factory=Queue.Queue):
"""Initialize an AdaptiveThreadPool.
An adaptive thread pool executes WorkItems using a number of
WorkerThreads. WorkItems represent items of work that may
succeed, soft fail, or hard fail. In addition, a completed work
item can signal this AdaptiveThreadPool to enable more or fewer
threads. Initially one thread is active. Soft failures are
reqeueud to be retried. Hard failures cause this
AdaptiveThreadPool to shut down entirely. See the WorkItem class
for more details.
Args:
num_threads: The number of threads to use.
queue_size: The size of the work item queue to use.
base_thread_name: A string from which worker thread names are derived.
worker_thread_factory: A factory which procudes WorkerThreads.
queue_factory: Used for dependency injection.
"""
if queue_size is None:
queue_size = num_threads
self.requeue = ReQueue(queue_size, queue_factory=queue_factory)
self.__thread_gate = ThreadGate(num_threads)
self.__num_threads = num_threads
self.__threads = []
for i in xrange(num_threads):
thread = worker_thread_factory(self, self.__thread_gate)
if base_thread_name:
base = base_thread_name
else:
base = thread.__class__.__name__
thread.name = '%s-%d' % (base, i)
self.__threads.append(thread)
thread.start()
def num_threads(self):
"""Return the number of threads in this thread pool."""
return self.__num_threads
def Threads(self):
"""Yields the registered threads."""
for thread in self.__threads:
yield thread
def SubmitItem(self, item, block=True, timeout=0.0):
"""Submit a WorkItem to the AdaptiveThreadPool.
Args:
item: A WorkItem instance.
block: Whether to block on submitting if the submit queue is full.
timeout: Time wait for room in the queue if block is True, 0.0 to
block indefinitely.
Raises:
Queue.Full if the submit queue is full.
"""
self.requeue.put(item, block=block, timeout=timeout)
def QueuedItemCount(self):
"""Returns the number of items currently in the queue."""
return self.requeue.qsize()
def Shutdown(self):
"""Shutdown the thread pool.
Tasks may remain unexecuted in the submit queue.
"""
while not self.requeue.empty():
try:
unused_item = self.requeue.get_nowait()
self.requeue.task_done()
except Queue.Empty:
pass
for thread in self.__threads:
thread.exit_flag = True
self.requeue.put(_THREAD_SHOULD_EXIT)
self.__thread_gate.EnableAllThreads()
def Wait(self):
"""Wait until all work items have been completed."""
self.requeue.join()
def JoinThreads(self):
"""Wait for all threads to exit."""
for thread in self.__threads:
logger.debug('Waiting for %s to exit' % str(thread))
thread.join()
def CheckErrors(self):
"""Output logs for any errors that occurred in the worker threads."""
for thread in self.__threads:
thread.CheckError()
class ThreadGate(object):
"""Manage the number of active worker threads.
The ThreadGate limits the number of threads that are simultaneously
active in order to implement adaptive rate control.
Initially the ThreadGate allows only one thread to be active. For
each successful work item, another thread is activated and for each
failed item, the number of active threads is reduced by one. When only
one thread is active, failures will cause exponential backoff.
For example, a ThreadGate instance, thread_gate can be used in a number
of threads as so:
# Block until this thread is enabled for work.
thread_gate.StartWork()
try:
status = DoSomeWorkInvolvingLimitedSharedResources()
suceeded = IsStatusGood(status)
badly_failed = IsStatusVeryBad(status)
finally:
if suceeded:
# Suceeded, add more simultaneously enabled threads to the task.
thread_gate.FinishWork(instruction=ThreadGate.INCREASE)
elif badly_failed:
# Failed, or succeeded but with high resource load, reduce number of
# workers.
thread_gate.FinishWork(instruction=ThreadGate.DECREASE)
else:
# We succeeded, but don't want to add more workers to the task.
thread_gate.FinishWork(instruction=ThreadGate.HOLD)
the thread_gate will enable and disable/backoff threads in response to
resource load conditions.
StartWork can block indefinitely. FinishWork, while not
lock-free, should never block absent a demonic scheduler.
"""
INCREASE = 'increase'
HOLD = 'hold'
DECREASE = 'decrease'
def __init__(self,
num_threads,
sleep=InterruptibleSleep):
"""Constructor for ThreadGate instances.
Args:
num_threads: The total number of threads using this gate.
sleep: Used for dependency injection.
"""
self.__enabled_count = 1
self.__lock = threading.Lock()
self.__thread_semaphore = threading.Semaphore(self.__enabled_count)
self.__num_threads = num_threads
self.__backoff_time = 0
self.__sleep = sleep
def num_threads(self):
return self.__num_threads
def EnableThread(self):
"""Enable one more worker thread."""
self.__lock.acquire()
try:
self.__enabled_count += 1
finally:
self.__lock.release()
self.__thread_semaphore.release()
def EnableAllThreads(self):
"""Enable all worker threads."""
for unused_idx in xrange(self.__num_threads - self.__enabled_count):
self.EnableThread()
def StartWork(self):
"""Starts a critical section in which the number of workers is limited.
Starts a critical section which allows self.__enabled_count
simultaneously operating threads. The critical section is ended by
calling self.FinishWork().
"""
self.__thread_semaphore.acquire()
if self.__backoff_time > 0.0:
if not threading.currentThread().exit_flag:
logger.info('[%s] Backing off due to errors: %.1f seconds',
threading.currentThread().getName(),
self.__backoff_time)
self.__sleep(self.__backoff_time)
def FinishWork(self, instruction=None):
"""Ends a critical section started with self.StartWork()."""
if not instruction or instruction == ThreadGate.HOLD:
self.__thread_semaphore.release()
elif instruction == ThreadGate.INCREASE:
if self.__backoff_time > 0.0:
logger.info('Resetting backoff to 0.0')
self.__backoff_time = 0.0
do_enable = False
self.__lock.acquire()
try:
if self.__num_threads > self.__enabled_count:
do_enable = True
self.__enabled_count += 1
finally:
self.__lock.release()
if do_enable:
logger.debug('Increasing active thread count to %d',
self.__enabled_count)
self.__thread_semaphore.release()
self.__thread_semaphore.release()
elif instruction == ThreadGate.DECREASE:
do_disable = False
self.__lock.acquire()
try:
if self.__enabled_count > 1:
do_disable = True
self.__enabled_count -= 1
else:
if self.__backoff_time == 0.0:
self.__backoff_time = INITIAL_BACKOFF
else:
self.__backoff_time *= BACKOFF_FACTOR
finally:
self.__lock.release()
if do_disable:
logger.debug('Decreasing the number of active threads to %d',
self.__enabled_count)
else:
self.__thread_semaphore.release()
class WorkItem(object):
"""Holds a unit of work."""
SUCCESS = 'success'
RETRY = 'retry'
FAILURE = 'failure'
def __init__(self, name):
self.__name = name
def PerformWork(self, thread_pool):
"""Perform the work of this work item and report the results.
Args:
thread_pool: The AdaptiveThreadPool instance associated with this
thread.
Returns:
A tuple (status, instruction) of the work status and an instruction
for the ThreadGate.
"""
raise NotImplementedError
def __str__(self):
return self.__name
| mit |
hkariti/ansible | lib/ansible/modules/cloud/google/gce.py | 16 | 28171 | #!/usr/bin/python
# Copyright 2013 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce
version_added: "1.4"
short_description: create or terminate GCE instances
description:
- Creates or terminates Google Compute Engine (GCE) instances. See
U(https://cloud.google.com/compute) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
image:
description:
- image string to use for the instance (default will follow latest
stable debian image)
required: false
default: "debian-8"
image_family:
description:
- image family from which to select the image. The most recent
non-deprecated image in the family will be used.
required: false
default: null
version_added: "2.4"
external_projects:
description:
- A list of other projects (accessible with the provisioning credentials)
to be searched for the image.
required: false
default: null
version_added: "2.4"
instance_names:
description:
- a comma-separated list of instance names to create or destroy
required: false
default: null
machine_type:
description:
- machine type to use for the instance, use 'n1-standard-1' by default
required: false
default: "n1-standard-1"
metadata:
description:
- a hash/dictionary of custom data for the instance;
'{"key":"value", ...}'
required: false
default: null
service_account_email:
version_added: "1.5.1"
description:
- service account email
required: false
default: null
service_account_permissions:
version_added: "2.0"
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
required: false
default: null
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
pem_file:
version_added: "1.5.1"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
required: false
default: null
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
default: null
required: false
project_id:
version_added: "1.5.1"
description:
- your GCE project ID
required: false
default: null
name:
description:
- either a name of a single instance or when used with 'num_instances',
the base name of a cluster of nodes
required: false
aliases: ['base_name']
num_instances:
description:
- can be used with 'name', specifies
the number of nodes to provision using 'name'
as a base name
required: false
version_added: "2.3"
network:
description:
- name of the network, 'default' will be used if not specified
required: false
default: "default"
subnetwork:
description:
- name of the subnetwork in which the instance should be created
required: false
default: null
version_added: "2.2"
persistent_boot_disk:
description:
- if set, create the instance with a persistent boot disk
required: false
default: "false"
disks:
description:
- a list of persistent disks to attach to the instance; a string value
gives the name of the disk; alternatively, a dictionary value can
define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
will be the boot disk (which must be READ_WRITE).
required: false
default: null
version_added: "1.7"
state:
description:
- desired state of the resource
required: false
default: "present"
choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"]
tags:
description:
- a comma-separated list of tags to associate with the instance
required: false
default: null
zone:
description:
- the GCE zone to use. The list of available zones is at U(https://cloud.google.com/compute/docs/regions-zones/regions-zones#available).
required: true
default: "us-central1-a"
ip_forward:
version_added: "1.9"
description:
- set to true if the instance can forward ip packets (useful for
gateways)
required: false
default: "false"
external_ip:
version_added: "1.9"
description:
- type of external ip, ephemeral by default; alternatively, a fixed gce ip or ip name can be given. Specify 'none' if no external ip is desired.
required: false
default: "ephemeral"
disk_auto_delete:
version_added: "1.9"
description:
- if set boot disk will be removed after instance destruction
required: false
default: "true"
preemptible:
version_added: "2.1"
description:
- if set to true, instances will be preemptible and time-limited.
(requires libcloud >= 0.20.0)
required: false
default: "false"
disk_size:
description:
- The size of the boot disk created for this instance (in GB)
required: false
default: 10
version_added: "2.3"
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
>= 0.20.0 if using preemptible option"
notes:
- Either I(instance_names) or I(name) is required.
- JSON credentials strongly preferred.
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>, Tom Melendez (@supertom) <supertom@google.com>"
'''
EXAMPLES = '''
# Basic provisioning example. Create a single Debian 8 instance in the
# us-central1-a Zone of the n1-standard-1 machine type.
# Create multiple instances by specifying multiple names, separated by
# commas in the instance_names field
# (e.g. my-test-instance1,my-test-instance2)
- gce:
instance_names: my-test-instance1
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
disk_size: 32
# Create a single instance of an image from the "my-base-image" image family
# in the us-central1-a Zone of the n1-standard-1 machine type.
# This image family is in the "my-other-project" GCP project.
- gce:
instance_names: my-test-instance1
zone: us-central1-a
machine_type: n1-standard-1
image_family: my-base-image
external_projects:
- my-other-project
state: present
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
disk_size: 32
# Create a single Debian 8 instance in the us-central1-a Zone
# Use existing disks, custom network/subnetwork, set service account permissions
# add tags and metadata.
- gce:
instance_names: my-test-instance
zone: us-central1-a
machine_type: n1-standard-1
state: present
metadata: '{"db":"postgres", "group":"qa", "id":500}'
tags:
- http-server
- my-other-tag
disks:
- name: disk-2
mode: READ_WRITE
- name: disk-3
mode: READ_ONLY
disk_auto_delete: false
network: foobar-network
subnetwork: foobar-subnetwork-1
preemptible: true
ip_forward: true
service_account_permissions:
- storage-full
- taskqueue
- bigquery
- https://www.googleapis.com/auth/ndev.clouddns.readwrite
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
---
# Example Playbook
- name: Compute Engine Instance Examples
hosts: localhost
vars:
service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
tasks:
- name: create multiple instances
# Basic provisioning example. Create multiple Debian 8 instances in the
# us-central1-a Zone of n1-standard-1 machine type.
gce:
instance_names: test1,test2,test3
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
metadata : '{ "startup-script" : "apt-get update" }'
register: gce
- name: Save host data
add_host:
hostname: "{{ item.public_ip }}"
groupname: gce_instances_ips
with_items: "{{ gce.instance_data }}"
- name: Wait for SSH for instances
wait_for:
delay: 1
host: "{{ item.public_ip }}"
port: 22
state: started
timeout: 30
with_items: "{{ gce.instance_data }}"
- name: Configure Hosts
hosts: gce_instances_ips
become: yes
become_method: sudo
roles:
- my-role-one
- my-role-two
tags:
- config
- name: delete test-instances
# Basic termination of instance.
gce:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
instance_names: "{{ gce.instance_names }}"
zone: us-central1-a
state: absent
tags:
- delete
'''
import socket
import logging
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect, unexpected_error_msg
from ansible.module_utils.gcp import get_valid_location
from ansible.module_utils.six.moves import reduce
def get_instance_info(inst):
"""Retrieves instance information from an instance object and returns it
as a dictionary.
"""
metadata = {}
if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
for md in inst.extra['metadata']['items']:
metadata[md['key']] = md['value']
try:
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
except:
netname = None
try:
subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
except:
subnetname = None
if 'disks' in inst.extra:
disk_names = [disk_info['source'].split('/')[-1]
for disk_info
in sorted(inst.extra['disks'],
key=lambda disk_info: disk_info['index'])]
else:
disk_names = []
if len(inst.public_ips) == 0:
public_ip = None
else:
public_ip = inst.public_ips[0]
return ({
'image': inst.image is not None and inst.image.split('/')[-1] or None,
'disks': disk_names,
'machine_type': inst.size,
'metadata': metadata,
'name': inst.name,
'network': netname,
'subnetwork': subnetname,
'private_ip': inst.private_ips[0],
'public_ip': public_ip,
'status': ('status' in inst.extra) and inst.extra['status'] or None,
'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
})
def create_instances(module, gce, instance_names, number, lc_zone):
"""Creates new instances. Attributes other than instance_names are picked
up from 'module'
module : AnsibleModule object
gce: authenticated GCE libcloud driver
instance_names: python list of instance names to create
number: number of instances to create
lc_zone: GCEZone object
Returns:
A list of dictionaries with instance information
about the instances that were launched.
"""
image = module.params.get('image')
image_family = module.params.get('image_family')
external_projects = module.params.get('external_projects')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
persistent_boot_disk = module.params.get('persistent_boot_disk')
disks = module.params.get('disks')
tags = module.params.get('tags')
ip_forward = module.params.get('ip_forward')
external_ip = module.params.get('external_ip')
disk_auto_delete = module.params.get('disk_auto_delete')
preemptible = module.params.get('preemptible')
disk_size = module.params.get('disk_size')
service_account_permissions = module.params.get('service_account_permissions')
if external_ip == "none":
instance_external_ip = None
elif external_ip != "ephemeral":
instance_external_ip = external_ip
try:
# check if instance_external_ip is an ip or a name
try:
socket.inet_aton(instance_external_ip)
instance_external_ip = GCEAddress(id='unknown', name='unknown', address=instance_external_ip, region='unknown', driver=gce)
except socket.error:
instance_external_ip = gce.ex_get_address(instance_external_ip)
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value))
else:
instance_external_ip = external_ip
new_instances = []
changed = False
lc_disks = []
disk_modes = []
for i, disk in enumerate(disks or []):
if isinstance(disk, dict):
lc_disks.append(gce.ex_get_volume(disk['name'], lc_zone))
disk_modes.append(disk['mode'])
else:
lc_disks.append(gce.ex_get_volume(disk, lc_zone))
# boot disk is implicitly READ_WRITE
disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
lc_network = gce.ex_get_network(network)
lc_machine_type = gce.ex_get_size(machine_type, lc_zone)
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
if isinstance(metadata, dict):
md = metadata
else:
try:
md = literal_eval(str(metadata))
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError as e:
module.fail_json(msg='bad metadata: %s' % str(e))
except SyntaxError as e:
module.fail_json(msg='bad metadata syntax')
if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
items = []
for k, v in md.items():
items.append({"key": k, "value": v})
metadata = {'items': items}
else:
metadata = md
lc_image = LazyDiskImage(module, gce, image, lc_disks, family=image_family, projects=external_projects)
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if perm not in gce.SA_SCOPES_MAP and not perm.startswith('https://www.googleapis.com/auth'):
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
# These variables all have default values but check just in case
if not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable',
changed=False)
gce_args = dict(
location=lc_zone,
ex_network=network, ex_tags=tags, ex_metadata=metadata,
ex_can_ip_forward=ip_forward,
external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
ex_service_accounts=ex_sa_perms
)
if preemptible is not None:
gce_args['ex_preemptible'] = preemptible
if subnetwork is not None:
gce_args['ex_subnetwork'] = subnetwork
if isinstance(instance_names, str) and not number:
instance_names = [instance_names]
if isinstance(instance_names, str) and number:
instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type,
lc_image(), number, **gce_args)
for resp in instance_responses:
n = resp
if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode):
try:
n = gce.ex_get_node(n.name, lc_zone)
except ResourceNotFoundError:
pass
else:
# Assure that at least one node has been created to set changed=True
changed = True
new_instances.append(n)
else:
for instance in instance_names:
pd = None
if lc_disks:
pd = lc_disks[0]
elif persistent_boot_disk:
try:
pd = gce.ex_get_volume("%s" % instance, lc_zone)
except ResourceNotFoundError:
pd = gce.create_volume(disk_size, "%s" % instance, image=lc_image())
gce_args['ex_boot_disk'] = pd
inst = None
try:
inst = gce.ex_get_node(instance, lc_zone)
except ResourceNotFoundError:
inst = gce.create_node(
instance, lc_machine_type, lc_image(), **gce_args
)
changed = True
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to create ' +
'instance %s, error: %s' % (instance, e.value))
if inst:
new_instances.append(inst)
for inst in new_instances:
for i, lc_disk in enumerate(lc_disks):
# Check whether the disk is already attached
if (len(inst.extra['disks']) > i):
attached_disk = inst.extra['disks'][i]
if attached_disk['source'] != lc_disk.extra['selfLink']:
module.fail_json(
msg=("Disk at index %d does not match: requested=%s found=%s" % (
i, lc_disk.extra['selfLink'], attached_disk['source'])))
elif attached_disk['mode'] != disk_modes[i]:
module.fail_json(
msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
i, disk_modes[i], attached_disk['mode'])))
else:
continue
gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
# Work around libcloud bug: attached volumes don't get added
# to the instance metadata. get_instance_info() only cares about
# source and index.
if len(inst.extra['disks']) != i + 1:
inst.extra['disks'].append(
{'source': lc_disk.extra['selfLink'], 'index': i})
instance_names = []
instance_json_data = []
for inst in new_instances:
d = get_instance_info(inst)
instance_names.append(d['name'])
instance_json_data.append(d)
return (changed, instance_json_data, instance_names)
def change_instance_state(module, gce, instance_names, number, zone, state):
"""Changes the state of a list of instances. For example,
change from started to stopped, or started to absent.
module: Ansible module object
gce: authenticated GCE connection object
instance_names: a list of instance names to terminate
zone: GCEZone object where the instances reside prior to termination
state: 'state' parameter passed into module as argument
Returns a dictionary of instance names that were changed.
"""
changed = False
nodes = []
state_instance_names = []
if isinstance(instance_names, str) and number:
node_names = ['%s-%03d' % (instance_names, i) for i in range(number)]
elif isinstance(instance_names, str) and not number:
node_names = [instance_names]
else:
node_names = instance_names
for name in node_names:
inst = None
try:
inst = gce.ex_get_node(name, zone)
except ResourceNotFoundError:
state_instance_names.append(name)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
else:
nodes.append(inst)
state_instance_names.append(name)
if state in ['absent', 'deleted'] and number:
changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False]
changed = reduce(lambda x, y: x or y, changed_nodes)
else:
for node in nodes:
if state in ['absent', 'deleted']:
gce.destroy_node(node)
changed = True
elif state == 'started' and node.state == libcloud.compute.types.NodeState.STOPPED:
gce.ex_start_node(node)
changed = True
elif state in ['stopped', 'terminated'] and node.state == libcloud.compute.types.NodeState.RUNNING:
gce.ex_stop_node(node)
changed = True
return (changed, state_instance_names)
def main():
module = AnsibleModule(
argument_spec=dict(
image=dict(default='debian-8'),
image_family=dict(),
external_projects=dict(type='list'),
instance_names=dict(),
machine_type=dict(default='n1-standard-1'),
metadata=dict(),
name=dict(aliases=['base_name']),
num_instances=dict(type='int'),
network=dict(default='default'),
subnetwork=dict(),
persistent_boot_disk=dict(type='bool', default=False),
disks=dict(type='list'),
state=dict(choices=['active', 'present', 'absent', 'deleted',
'started', 'stopped', 'terminated'],
default='present'),
tags=dict(type='list'),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
project_id=dict(),
ip_forward=dict(type='bool', default=False),
external_ip=dict(default='ephemeral'),
disk_auto_delete=dict(type='bool', default=True),
disk_size=dict(type='int', default=10),
preemptible=dict(type='bool', default=None),
),
mutually_exclusive=[('instance_names', 'name')]
)
if not HAS_PYTHON26:
module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
gce = gce_connect(module)
image = module.params.get('image')
image_family = module.params.get('image_family')
external_projects = module.params.get('external_projects')
instance_names = module.params.get('instance_names')
name = module.params.get('name')
number = module.params.get('num_instances')
subnetwork = module.params.get('subnetwork')
state = module.params.get('state')
zone = module.params.get('zone')
preemptible = module.params.get('preemptible')
changed = False
inames = None
if isinstance(instance_names, list):
inames = instance_names
elif isinstance(instance_names, str):
inames = instance_names.split(',')
if name:
inames = name
if not inames:
module.fail_json(msg='Must specify a "name" or "instance_names"',
changed=False)
if not zone:
module.fail_json(msg='Must specify a "zone"', changed=False)
lc_zone = get_valid_location(module, gce, zone)
if preemptible is not None and hasattr(libcloud, '__version__') and libcloud.__version__ < '0.20':
module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option",
changed=False)
if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'):
module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option",
changed=False)
json_output = {'zone': zone}
if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']:
json_output['state'] = state
(changed, state_instance_names) = change_instance_state(
module, gce, inames, number, lc_zone, state)
# based on what user specified, return the same variable, although
# value could be different if an instance could not be destroyed
if instance_names or name and number:
json_output['instance_names'] = state_instance_names
elif name:
json_output['name'] = name
elif state in ['active', 'present']:
json_output['state'] = 'present'
(changed, instance_data, instance_name_list) = create_instances(
module, gce, inames, number, lc_zone)
json_output['instance_data'] = instance_data
if instance_names:
json_output['instance_names'] = instance_name_list
elif name:
json_output['name'] = name
json_output['changed'] = changed
module.exit_json(**json_output)
class LazyDiskImage:
"""
Object for lazy instantiation of disk image
gce.ex_get_image is a very expensive call, so we want to avoid calling it as much as possible.
"""
def __init__(self, module, gce, name, has_pd, family=None, projects=None):
self.image = None
self.was_called = False
self.gce = gce
self.name = name
self.has_pd = has_pd
self.module = module
self.family = family
self.projects = projects
def __call__(self):
if not self.was_called:
self.was_called = True
if not self.has_pd:
if self.family:
self.image = self.gce.ex_get_image_from_family(self.family, ex_project_list=self.projects)
else:
self.image = self.gce.ex_get_image(self.name, ex_project_list=self.projects)
if not self.image:
self.module.fail_json(msg='image or disks missing for create instance', changed=False)
return self.image
if __name__ == '__main__':
main()
| gpl-3.0 |
zedr/django | tests/utils_tests/test_ipv6.py | 67 | 2831 | from __future__ import unicode_literals
import unittest
from django.utils.ipv6 import is_valid_ipv6_address, clean_ipv6_address
class TestUtilsIPv6(unittest.TestCase):
def test_validates_correct_plain_address(self):
self.assertTrue(is_valid_ipv6_address('fe80::223:6cff:fe8a:2e8a'))
self.assertTrue(is_valid_ipv6_address('2a02::223:6cff:fe8a:2e8a'))
self.assertTrue(is_valid_ipv6_address('1::2:3:4:5:6:7'))
self.assertTrue(is_valid_ipv6_address('::'))
self.assertTrue(is_valid_ipv6_address('::a'))
self.assertTrue(is_valid_ipv6_address('2::'))
def test_validates_correct_with_v4mapping(self):
self.assertTrue(is_valid_ipv6_address('::ffff:254.42.16.14'))
self.assertTrue(is_valid_ipv6_address('::ffff:0a0a:0a0a'))
def test_validates_incorrect_plain_address(self):
self.assertFalse(is_valid_ipv6_address('foo'))
self.assertFalse(is_valid_ipv6_address('127.0.0.1'))
self.assertFalse(is_valid_ipv6_address('12345::'))
self.assertFalse(is_valid_ipv6_address('1::2:3::4'))
self.assertFalse(is_valid_ipv6_address('1::zzz'))
self.assertFalse(is_valid_ipv6_address('1::2:3:4:5:6:7:8'))
self.assertFalse(is_valid_ipv6_address('1:2'))
self.assertFalse(is_valid_ipv6_address('1:::2'))
def test_validates_incorrect_with_v4mapping(self):
self.assertFalse(is_valid_ipv6_address('::ffff:999.42.16.14'))
self.assertFalse(is_valid_ipv6_address('::ffff:zzzz:0a0a'))
# The ::1.2.3.4 format used to be valid but was deprecated
# in rfc4291 section 2.5.5.1
self.assertTrue(is_valid_ipv6_address('::254.42.16.14'))
self.assertTrue(is_valid_ipv6_address('::0a0a:0a0a'))
self.assertFalse(is_valid_ipv6_address('::999.42.16.14'))
self.assertFalse(is_valid_ipv6_address('::zzzz:0a0a'))
def test_cleanes_plain_address(self):
self.assertEqual(clean_ipv6_address('DEAD::0:BEEF'), 'dead::beef')
self.assertEqual(clean_ipv6_address('2001:000:a:0000:0:fe:fe:beef'), '2001:0:a::fe:fe:beef')
self.assertEqual(clean_ipv6_address('2001::a:0000:0:fe:fe:beef'), '2001:0:a::fe:fe:beef')
def test_cleanes_with_v4_mapping(self):
self.assertEqual(clean_ipv6_address('::ffff:0a0a:0a0a'), '::ffff:10.10.10.10')
self.assertEqual(clean_ipv6_address('::ffff:1234:1234'), '::ffff:18.52.18.52')
self.assertEqual(clean_ipv6_address('::ffff:18.52.18.52'), '::ffff:18.52.18.52')
def test_unpacks_ipv4(self):
self.assertEqual(clean_ipv6_address('::ffff:0a0a:0a0a', unpack_ipv4=True), '10.10.10.10')
self.assertEqual(clean_ipv6_address('::ffff:1234:1234', unpack_ipv4=True), '18.52.18.52')
self.assertEqual(clean_ipv6_address('::ffff:18.52.18.52', unpack_ipv4=True), '18.52.18.52')
| bsd-3-clause |
KDD-OpenSource/fexum | features/tests/test_tasks.py | 1 | 15890 | from os import stat
from time import time
from unittest.mock import patch, call
import SharedArray as sa
from django.test import TestCase
from features.models import Feature, Bin, Dataset, Slice, Redundancy, Relevancy, \
Spectrogram
from features.models import ResultCalculationMap, Calculation
from features.tasks import _dataframe_columns, _dataframe_last_access, _get_dataframe
from features.tasks import initialize_from_dataset, build_histogram, \
calculate_feature_statistics, calculate_hics, calculate_densities, remove_unused_dataframes, \
build_spectrogram
from features.tasks import get_samples, calculate_conditional_distributions
from features.tests.factories import FeatureFactory, DatasetFactory, ResultCalculationMapFactory, CalculationFactory
# TODO: test for results
def _build_test_dataset() -> Dataset:
dataset = DatasetFactory()
feature_names = ['Col1', 'Col2', 'Col3']
for feature_name in feature_names:
FeatureFactory(name=feature_name, dataset=dataset)
return dataset
class TestInitializeFromDatasetTask(TestCase):
def test_initialize_from_dataset(self):
dataset = DatasetFactory()
feature_names = ['Col1', 'Col2', 'Col3']
# TODO: Fuck nesting
with patch('features.tasks.build_histogram.subtask') as build_histogram_mock:
with patch('features.tasks.calculate_feature_statistics.subtask') \
as calculate_feature_statistics_mock:
with patch('features.tasks.build_spectrogram.subtask') \
as build_spectrogram_mock:
with patch('features.tasks.initialize_from_dataset_processing_callback.subtask') \
as initialize_from_dataset_processing_callback_mock:
with patch('features.tasks.chord') \
as chord_mock:
initialize_from_dataset(dataset_id=dataset.id)
# Make sure that we call the preprocessing task for each feature
features = Feature.objects.filter(name__in=feature_names).all()
kalls = [call(immutable=True, kwargs={'feature_id': feature.id}) for feature in features]
build_histogram_mock.assert_has_calls(kalls, any_order=True)
calculate_feature_statistics_mock.assert_has_calls(kalls, any_order=True)
build_spectrogram_mock.assert_has_calls(kalls, any_order=True)
initialize_from_dataset_processing_callback_mock.assert_called_once_with(
kwargs={'dataset_id': dataset.id})
chord_mock.assert_called_once()
self.assertEqual(feature_names, [feature.name for feature in Feature.objects.all()])
class TestBuildHistogramTask(TestCase):
def test_build_histogram(self):
dataset = _build_test_dataset()
feature = Feature.objects.get(dataset=dataset, name='Col2')
bin_values = [6, 1, 4, 0, 2]
bin_count = len(bin_values)
build_histogram(feature_id=feature.id, bins=bin_count)
# Rudementary check bins only for its values
self.assertEqual(Bin.objects.count(), bin_count)
for bin_obj in Bin.objects.all():
self.assertEqual(bin_obj.feature, feature)
self.assertIn(bin_obj.count, bin_values)
class TestCalculateDensities(TestCase):
def test_calculate_densities(self):
dataset = _build_test_dataset()
feature = Feature.objects.get(dataset=dataset, name='Col2')
target_feature = Feature.objects.get(dataset=dataset, name='Col3')
target_feature.categories = [0, 1, 2]
target_feature.save()
count_categories = len(target_feature.categories)
validation_category = 1.0
densities = calculate_densities(str(target_feature.id), str(feature.id))
self.assertIsInstance(densities, list)
self.assertEqual(len(densities), count_categories)
self.assertIn(validation_category, (d['target_class'] for d in densities))
validation_category_density = next(d for d in densities if d['target_class'] == validation_category)
validation_category_density_values = validation_category_density['density_values']
# Kernel density is not deterministic, therefore we only check result length and valid range
self.assertEqual(len(validation_category_density_values), 100)
for y in validation_category_density_values:
self.assertGreater(y, 0.15)
self.assertLess(y, 0.6)
class TestGetSamples(TestCase):
def test_get_samples(self):
dataset = _build_test_dataset()
feature = Feature.objects.get(dataset=dataset, name='Col2')
max_samples = 5
samples = get_samples(feature.id, max_samples)
self.assertEqual(samples,
{str(feature.id): [-0.24040447000000001, 0.74163977000000003, -0.046074360000000002,
-1.3395821000000001, -0.30984600000000001]})
class TestCalculateFeatureStatistics(TestCase):
def test_calculate_feature_statistics(self):
dataset = _build_test_dataset()
feature = Feature.objects.get(dataset=dataset, name='Col2')
calculate_feature_statistics(feature_id=feature.id)
feature = Feature.objects.get(id=feature.id)
self.assertEqual(feature.mean, -0.2838365385)
self.assertEqual(feature.variance, 0.406014248150876)
self.assertEqual(feature.min, -1.3975821)
self.assertEqual(feature.max, 0.74163977)
self.assertEqual(feature.is_categorical, False)
self.assertEqual(feature.categories, None)
def test_calculate_feature_statistics_is_categorical(self):
dataset = _build_test_dataset()
feature = Feature.objects.get(dataset=dataset, name='Col3')
calculate_feature_statistics(feature_id=feature.id)
feature = Feature.objects.get(id=feature.id)
self.assertEqual(feature.mean, 0.9)
self.assertEqual(feature.variance, 0.69)
self.assertEqual(feature.min, 0)
self.assertEqual(feature.max, 2.0)
self.assertEqual(feature.categories, [0, 1, 2])
self.assertEqual(feature.is_categorical, True)
class TestCalculateHics(TestCase):
def test_calculate_incremental_hics(self):
pass
def test_calculate_bivariate_hics(self):
dataset = _build_test_dataset()
feature1 = Feature.objects.get(dataset=dataset, name='Col1')
feature2 = Feature.objects.get(dataset=dataset, name='Col2')
target = Feature.objects.get(dataset=dataset, name='Col3')
result_calculation_map = ResultCalculationMapFactory(target=target)
calculation = CalculationFactory(result_calculation_map=result_calculation_map, max_iteration=2, type=Calculation.DEFAULT_HICS)
features = [feature1, feature2]
# Select first feature as target
calculate_hics(calculation_id=calculation.id, bivariate=True, calculate_redundancies=True)
calculate_hics(calculation_id=calculation.id, bivariate=True, calculate_redundancies=True)
# Result
self.assertEqual(ResultCalculationMap.objects.count(), 1)
# Relevancies
for relevancy in Relevancy.objects.all():
self.assertIsNotNone(relevancy.relevancy)
self.assertEqual(relevancy.result_calculation_map.target, target)
self.assertEqual(relevancy.iteration, 10)
self.assertIn(relevancy.features.first(), [feature1, feature2])
self.assertEqual(Relevancy.objects.filter(features=feature1).count(), 1)
self.assertEqual(Relevancy.objects.filter(features=feature2).count(), 1)
# Slices
for fslice in Slice.objects.all():
self.assertNotEqual(fslice.object_definition, [])
self.assertNotEqual(fslice.output_definition, [])
self.assertEqual(fslice.result_calculation_map.target, target)
self.assertIn(fslice.features.first(), features)
self.assertEqual(Slice.objects.filter(features=feature1).count(), 1)
self.assertEqual(Slice.objects.filter(features=feature2).count(), 1)
# Redundancies
self.assertEqual(Redundancy.objects.count(), 1)
self.assertTrue((Redundancy.objects.first().first_feature == feature1 and Redundancy.objects.first().second_feature == feature2)
or (Redundancy.objects.first().second_feature == feature1 and Redundancy.objects.first().first_feature == feature2))
# self.assertEqual(Redundancy.objects.first().redundancy, 1)
# Calculation
calculation = Calculation.objects.filter(result_calculation_map=ResultCalculationMap.objects.get(target=target)).last()
self.assertIsNotNone(calculation)
self.assertEqual(calculation.current_iteration, calculation.max_iteration)
self.assertEqual(calculation.type, Calculation.DEFAULT_HICS)
def test_calculate_feature_set_hics(self):
dataset = _build_test_dataset()
feature1 = Feature.objects.get(dataset=dataset, name='Col1')
feature2 = Feature.objects.get(dataset=dataset, name='Col2')
target = Feature.objects.get(dataset=dataset, name='Col3')
result_calculation_map = ResultCalculationMapFactory(target=target)
calculation = CalculationFactory(result_calculation_map=result_calculation_map, max_iteration=1, type=Calculation.FIXED_FEATURE_SET_HICS)
features = [feature1, feature2]
feature_ids = {feature1.id, feature2.id}
calculate_hics(calculation_id=calculation.id, bivariate=False, feature_ids=feature_ids)
# Relevancy
relevancy_features = Relevancy.objects.filter(features=feature1)
relevancy_features = relevancy_features.filter(features=feature2)
self.assertEqual(relevancy_features.count(), Relevancy.objects.count())
self.assertEqual(relevancy_features.count(), 1)
self.assertEqual(relevancy_features.first().iteration, 5)
self.assertEqual(relevancy_features.first().result_calculation_map.target, target)
self.assertIsNotNone(relevancy_features.first().relevancy)
for feature in relevancy_features.first().features.all():
self.assertIn(feature, features)
# Slices
slices_features = Slice.objects.filter(features=feature1)
slices_features = slices_features.filter(features=feature2)
self.assertEqual(slices_features.count(), Slice.objects.count())
self.assertEqual(slices_features.count(), 1)
self.assertEqual(slices_features.first().result_calculation_map.target, target)
self.assertNotEqual(slices_features.first().output_definition, [])
self.assertNotEqual(slices_features.first().object_definition, [])
# Calculation
calculation = Calculation.objects.filter(result_calculation_map=ResultCalculationMap.objects.get(target=target)).last()
self.assertIsNotNone(calculation)
self.assertEqual(calculation.current_iteration, calculation.max_iteration)
self.assertEqual(calculation.type, Calculation.FIXED_FEATURE_SET_HICS)
def test_calculate_super_set_hics(self):
dataset = _build_test_dataset()
feature1 = Feature.objects.get(dataset=dataset, name='Col1')
feature2 = Feature.objects.get(dataset=dataset, name='Col2')
target = Feature.objects.get(dataset=dataset, name='Col3')
result_calculation_map = ResultCalculationMapFactory(target=target)
calculation = CalculationFactory(result_calculation_map=result_calculation_map, max_iteration=1, type=Calculation.FEATURE_SUPER_SET_HICS)
feature_ids = {feature1.id}
calculate_hics(calculation_id=calculation.id, bivariate=False, feature_ids=feature_ids, calculate_supersets=True)
# Relevancy
relevancy_supersets = Relevancy.objects.filter(features=feature1)
self.assertEqual(relevancy_supersets.count(), Relevancy.objects.count())
self.assertGreater(relevancy_supersets.count(), 0)
iteration_sum = 0
for relevancy in relevancy_supersets.all():
iteration_sum += relevancy.iteration
self.assertEqual(relevancy.result_calculation_map.target, target)
self.assertIsNotNone(relevancy.relevancy)
self.assertEqual(iteration_sum, 10)
# Slices
slices_supersets = Slice.objects.filter(features=feature1)
self.assertEqual(slices_supersets.count(), Slice.objects.count())
self.assertGreater(slices_supersets.count(), 0)
for fslices in slices_supersets.all():
self.assertEqual(fslices.result_calculation_map.target, target)
self.assertNotEqual(fslices.output_definition, [])
self.assertNotEqual(fslices.object_definition, [])
# Calculation
calculation = Calculation.objects.filter(result_calculation_map=ResultCalculationMap.objects.get(target=target)).last()
self.assertIsNotNone(calculation)
self.assertEqual(calculation.current_iteration, calculation.max_iteration)
self.assertEqual(calculation.type, Calculation.FEATURE_SUPER_SET_HICS)
class TestRemoveUnusedDatasets(TestCase):
def test_remove_unused_datasets(self):
dataset = _build_test_dataset()
# Manually load the dataframe into memory
_get_dataframe(dataset.id)
self.assertLess(_dataframe_last_access[str(dataset.id)], time())
self.assertGreater(_dataframe_last_access[str(dataset.id)], time() - 60)
self.assertEqual(list(_dataframe_columns[str(dataset.id)]), [feature.name for feature in dataset.feature_set.all()])
self.assertIn(str(dataset.id), [dataset.name.decode('ascii') for dataset in sa.list()])
remove_unused_dataframes(max_delta=0)
self.assertNotIn(str(dataset.id), _dataframe_last_access)
self.assertNotIn(str(dataset.id), _dataframe_columns)
self.assertNotIn(str(dataset.id), [dataset.name.decode('ascii') for dataset in sa.list()])
class TestBuildSpectrogram(TestCase):
def test_build_spectrogram(self):
width = 10
height = 34
dataset = _build_test_dataset()
feature = Feature.objects.get(dataset=dataset, name='Col1')
build_spectrogram(feature.id, height=height, width=width)
spectrogram = Spectrogram.objects.get(feature=feature)
self.assertEqual(spectrogram.feature, feature)
self.assertEqual(spectrogram.width, width)
self.assertEqual(spectrogram.height, height)
self.assertEqual(stat(spectrogram.image.name).st_size, 699)
class TestCalculateArbitarySlices(TestCase):
pass
class TestCalculateConditionalDistributions(TestCase):
def test_calculate_conditional_distributions(self):
dataset = _build_test_dataset()
feature1 = Feature.objects.get(dataset=dataset, name='Col1')
feature2 = Feature.objects.get(dataset=dataset, name='Col2')
target = Feature.objects.get(dataset=dataset, name='Col3')
feature_constraints = [{'feature': feature1.id, 'categories': [0, 1]},
{'feature': feature2.id, 'range': {'from_value': -0.1, 'to_value': 0.2}}]
max_samples = 2
distributions_and_samples = calculate_conditional_distributions(target.id, feature_constraints, max_samples)
self.assertEqual(distributions_and_samples,
{'distribution': [{'value': 0.0, 'probability': 1.0}],
'samples': {
str(feature1.id): [0.0, 0.0],
str(feature2.id): [-0.046074360000000002, -0.047435999999999999],
str(target.id): [0.0, 0.0]}
})
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.