repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
vrsys/avango | avango-blender/blender-json-loader/application.py | 2 | 3250 | from examples_common.GuaVE import GuaVE
import avango.gua
class Application:
def __init__(self):
self.viewer = avango.gua.nodes.Viewer(Name='Viewer')
self.scenegraph = avango.gua.nodes.SceneGraph(Name='SceneGraph')
self.window = None
self.screen = None
self.camera = None
self.field_containers = {}
self.planned_field_connections = []
self.root = avango.gua.nodes.TransformNode(
Name="Custom_Root",
)
self.scenegraph.Root.value.Children.value.append(self.root)
def run(self):
guaVE = GuaVE()
guaVE.start(locals(), globals(), show_banner=False)
tmp = self.camera.EnableFrustumCulling.value
self.camera.EnableFrustumCulling.value = False
self.viewer.frame()
self.camera.EnableFrustumCulling.value = tmp
self.viewer.run()
def basic_setup(self):
size = avango.gua.Vec2ui(1600, 900)
# self.camera = avango.gua.nodes.CameraNode(
# LeftScreenPath = "/screen",
# SceneGraph = "SceneGraph",
# Resolution = size,
# OutputWindowName = "window",
# Transform = avango.gua.make_trans_mat(0.0, 0.0, 3.5)
# )
# self.screen = avango.gua.nodes.ScreenNode(
# Name = "screen",
# Width = 2,
# Height = 1.5,
# Children = [self.camera],
# Transform = avango.gua.make_trans_mat(0.0, 0.0, 15.0)
# )
self.camera.LeftScreenPath.value = self.screen.Path.value
self.scenegraph.Root.value.Children.value.append(self.screen)
self.window = avango.gua.nodes.GlfwWindow(
Size=size,
LeftResolution=size,
)
avango.gua.register_window("window", self.window)
self.viewer.SceneGraphs.value = [self.scenegraph]
self.viewer.Windows.value = [self.window]
def add_field_container(self, field_container):
self.field_containers[field_container.Name.value] = field_container
def add_field_connection(
self,
from_container_name,
from_field_name,
to_container_name,
to_field_name):
to_field =\
self.field_containers[to_container_name].get_field(to_field_name)
from_field = self.field_containers[from_container_name].\
get_field(from_field_name)
print(("add field connection", to_field_name))
to_field.connect_from(from_field)
def plan_field_connection(
self,
from_container_name,
from_field_name,
to_container_name,
to_field_name):
from_container_name = from_container_name.replace('.', '_')
to_container_name = to_container_name.replace('.', '_')
self.planned_field_connections.append((
from_container_name,
from_field_name,
to_container_name,
to_field_name
))
def apply_field_connections(self):
for fc in self.planned_field_connections:
self.add_field_connection(fc[0], fc[1], fc[2], fc[3])
self.planned_field_connections = []
def set_camera(self, camera):
self.camera = camera
| lgpl-3.0 |
SaikWolf/gnuradio | gr-filter/python/filter/qa_fractional_interpolator.py | 47 | 3147 | #!/usr/bin/env python
#
# Copyright 2007,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, filter, blocks
import math
def sig_source_f(samp_rate, freq, amp, N):
t = map(lambda x: float(x)/samp_rate, xrange(N))
y = map(lambda x: math.sin(2.*math.pi*freq*x), t)
return y
def sig_source_c(samp_rate, freq, amp, N):
t = map(lambda x: float(x)/samp_rate, xrange(N))
y = map(lambda x: math.cos(2.*math.pi*freq*x) + \
1j*math.sin(2.*math.pi*freq*x), t)
return y
class test_fractional_resampler(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_ff(self):
N = 10000 # number of samples to use
fs = 1000 # baseband sampling rate
rrate = 1.123 # resampling rate
freq = 10
data = sig_source_f(fs, freq, 1, N)
signal = blocks.vector_source_f(data)
op = filter.fractional_interpolator_ff(0, rrate)
snk = blocks.vector_sink_f()
self.tb.connect(signal, op, snk)
self.tb.run()
Ntest = 5000
L = len(snk.data())
t = map(lambda x: float(x)/(fs/rrate), xrange(L))
phase = 0.1884
expected_data = map(lambda x: math.sin(2.*math.pi*freq*x+phase), t)
dst_data = snk.data()
self.assertFloatTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 3)
def test_002_cc(self):
N = 10000 # number of samples to use
fs = 1000 # baseband sampling rate
rrate = 1.123 # resampling rate
freq = 10
data = sig_source_c(fs, freq, 1, N)
signal = blocks.vector_source_c(data)
op = filter.fractional_interpolator_cc(0.0, rrate)
snk = blocks.vector_sink_c()
self.tb.connect(signal, op, snk)
self.tb.run()
Ntest = 5000
L = len(snk.data())
t = map(lambda x: float(x)/(fs/rrate), xrange(L))
phase = 0.1884
expected_data = map(lambda x: math.cos(2.*math.pi*freq*x+phase) + \
1j*math.sin(2.*math.pi*freq*x+phase), t)
dst_data = snk.data()
self.assertComplexTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 3)
if __name__ == '__main__':
gr_unittest.run(test_fractional_resampler, "test_fractional_resampler.xml")
| gpl-3.0 |
rossweinstein/Evolutionary-Computing-Python | src/main/Main.py | 1 | 1131 | from src.ecSystem.ECSystem import ECSystem
from src.ecSystem.ECSystemParameters import ECSystemParameters
# Where we actually run our EC System
params = ECSystemParameters()
# Governs the number of expressions in each generation
params.generation_size = 200
# Governs the length of the expressions in the initial population
params.genome_size = 15
# The percentage of the population selected for the next generation
params.fitness_threshold = 0.2
# If our fitness is not improving over this set number of generations, the EC System reboots
params.stagnation_threshold = 30
# The percentage of the population selected for mutation
params.mutation_percentage = .1
# Minimum fitness value required for the system to deem the expression equivalent to training data
params.success_threshold = 0.01
# Trainging Data: The x and y values used to evaluate the expression's fitness
params.x_training_data = [-55.0, -35.0, -11.0, -1.0, 1.0, 19.0, 87.0, 101.0]
params.y_training_data = [1512.0, 612.0, 60, 0.0, 0.0, 180.0, 3784, 5100.0]
ec_system = ECSystem(params)
ec_system.run_ec_system()
# System results
print(ec_system.stats)
| mit |
camptocamp/odoo | openerp/report/render/simple.py | 117 | 3155 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import render
from cStringIO import StringIO
import xml.dom.minidom
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table
from reportlab.lib.units import mm
from reportlab.lib.pagesizes import A4
import reportlab.lib
import copy
class simple(render.render):
def _render(self):
self.result = StringIO()
parser = xml.dom.minidom.parseString(self.xml)
title = parser.documentElement.tagName
doc = SimpleDocTemplate(self.result, pagesize=A4, title=title,
author='OpenERP, Fabien Pinckaers', leftmargin=10*mm, rightmargin=10*mm)
styles = reportlab.lib.styles.getSampleStyleSheet()
title_style = copy.deepcopy(styles["Heading1"])
title_style.alignment = reportlab.lib.enums.TA_CENTER
story = [ Paragraph(title, title_style) ]
style_level = {}
nodes = [ (parser.documentElement,0) ]
while len(nodes):
node = nodes.pop(0)
value = ''
n=len(node[0].childNodes)-1
while n>=0:
if node[0].childNodes[n].nodeType==3:
value += node[0].childNodes[n].nodeValue
else:
nodes.insert( 0, (node[0].childNodes[n], node[1]+1) )
n-=1
if not node[1] in style_level:
style = copy.deepcopy(styles["Normal"])
style.leftIndent=node[1]*6*mm
style.firstLineIndent=-3*mm
style_level[node[1]] = style
story.append( Paragraph('<b>%s</b>: %s' % (node[0].tagName, value), style_level[node[1]]))
doc.build(story)
return self.result.getvalue()
if __name__=='__main__':
s = simple()
s.xml = '''<test>
<author-list>
<author>
<name>Fabien Pinckaers</name>
<age>23</age>
</author>
<author>
<name>Michel Pinckaers</name>
<age>53</age>
</author>
No other
</author-list>
</test>'''
if s.render():
print s.get()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
FCP-INDI/nipype | nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py | 5 | 1958 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..longitudinal import RobustTemplate
def test_RobustTemplate_inputs():
input_map = dict(args=dict(argstr='%s',
),
auto_detect_sensitivity=dict(argstr='--satit',
mandatory=True,
xor=['outlier_sensitivity'],
),
average_metric=dict(argstr='--average %d',
),
environ=dict(nohash=True,
usedefault=True,
),
fixed_timepoint=dict(argstr='--fixtp',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_files=dict(argstr='--mov %s',
mandatory=True,
),
in_intensity_scales=dict(argstr='--iscalein %s',
),
initial_timepoint=dict(argstr='--inittp %d',
),
initial_transforms=dict(argstr='--ixforms %s',
),
intensity_scaling=dict(argstr='--iscale',
),
no_iteration=dict(argstr='--noit',
),
out_file=dict(argstr='--template %s',
mandatory=True,
usedefault=True,
),
outlier_sensitivity=dict(argstr='--sat %.4f',
mandatory=True,
xor=['auto_detect_sensitivity'],
),
scaled_intensity_outputs=dict(argstr='--iscaleout %s',
),
subjects_dir=dict(),
subsample_threshold=dict(argstr='--subsample %d',
),
terminal_output=dict(nohash=True,
),
transform_outputs=dict(argstr='--lta %s',
),
)
inputs = RobustTemplate.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_RobustTemplate_outputs():
output_map = dict(out_file=dict(),
scaled_intensity_outputs=dict(),
transform_outputs=dict(),
)
outputs = RobustTemplate.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
atyenoria/docker-registry | docker_registry/toolkit.py | 18 | 11250 | # -*- coding: utf-8 -*-
import base64
import functools
import hashlib
import logging
import os
import random
import re
import string
import time
import urllib
import flask
from M2Crypto import RSA
import requests
from docker_registry.core import compat
json = compat.json
from . import storage
from .lib import config
cfg = config.load()
logger = logging.getLogger(__name__)
_re_docker_version = re.compile('docker/([^\s]+)')
_re_authorization = re.compile(r'(\w+)[:=][\s"]?([^",]+)"?')
_re_hex_image_id = re.compile(r'^([a-f0-9]{16}|[a-f0-9]{64})$')
def valid_image_id(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
image_id = kwargs.get('image_id', '')
if _re_hex_image_id.match(image_id):
return f(*args, **kwargs)
return api_error("Invalid image ID", 404)
return wrapper
def docker_client_version():
"""Try and extract the client version from the User-Agent string
So we can warn older versions of the Docker engine/daemon about
incompatible APIs. If we can't figure out the version (e.g. the
client is not a Docker engine), just return None.
"""
ua = flask.request.headers.get('user-agent', '')
m = _re_docker_version.search(ua)
if not m:
return
version = m.group(1)
if '-' in version:
version = version.split('-')[0]
try:
return tuple(int(x) for x in version)
except ValueError:
return
class SocketReader(object):
def __init__(self, fp):
self._fp = fp
self.handlers = []
def __iter__(self):
return self.iterate()
def iterate(self, chunk_size=-1):
if isinstance(self._fp, requests.Response):
if chunk_size == -1:
chunk_size = 1024
for chunk in self._fp.iter_content(chunk_size):
for handler in self.handlers:
handler(chunk)
yield chunk
else:
chunk = self._fp.read(chunk_size)
while chunk:
for handler in self.handlers:
handler(chunk)
yield chunk
chunk = self._fp.read(chunk_size)
def add_handler(self, handler):
self.handlers.append(handler)
def read(self, n=-1):
buf = self._fp.read(n)
if not buf:
return ''
for handler in self.handlers:
handler(buf)
return buf
def response(data=None, code=200, headers=None, raw=False):
if data is None:
data = True
h = {
'Cache-Control': 'no-cache',
'Expires': '-1',
'Content-Type': 'application/json'
}
if headers:
h.update(headers)
if h['Cache-Control'] == 'no-cache':
h['Pragma'] = 'no-cache'
try:
if raw is False:
data = json.dumps(data, sort_keys=True, skipkeys=True)
except TypeError:
data = str(data)
return flask.current_app.make_response((data, code, h))
def validate_parent_access(parent_id):
if cfg.standalone:
return True
auth = _parse_auth_header()
if not auth:
return False
full_repos_name = auth.get('repository', '').split('/')
if len(full_repos_name) != 2:
logger.debug('validate_parent: Invalid repository field')
return False
url = '{0}/v1/repositories/{1}/{2}/layer/{3}/access'.format(
cfg.index_endpoint, full_repos_name[0], full_repos_name[1], parent_id
)
headers = {'Authorization': flask.request.headers.get('authorization')}
resp = requests.get(url, verify=True, headers=headers)
if resp.status_code != 200:
logger.debug('validate_parent: index returns status {0}'.format(
resp.status_code
))
return False
try:
# Note(dmp): unicode patch XXX not applied! Assuming requests does it
logger.debug('validate_parent: Content: {0}'.format(resp.text))
return json.loads(resp.text).get('access', False)
except ValueError:
logger.debug('validate_parent: Wrong response format')
return False
def validate_token(auth):
full_repos_name = auth.get('repository', '').split('/')
if len(full_repos_name) != 2:
logger.debug('validate_token: Invalid repository field')
return False
url = '{0}/v1/repositories/{1}/{2}/images'.format(cfg.index_endpoint,
full_repos_name[0],
full_repos_name[1])
headers = {'Authorization': flask.request.headers.get('authorization')}
resp = requests.get(url, verify=True, headers=headers)
logger.debug('validate_token: Index returned {0}'.format(resp.status_code))
if resp.status_code != 200:
return False
store = storage.load()
try:
# Note(dmp): unicode patch XXX not applied (requests)
images_list = [i['id'] for i in json.loads(resp.text)]
store.put_content(store.images_list_path(*full_repos_name),
json.dumps(images_list))
except ValueError:
logger.debug('validate_token: Wrong format for images_list')
return False
return True
def get_remote_ip():
if 'X-Forwarded-For' in flask.request.headers:
return flask.request.headers.getlist('X-Forwarded-For')[0]
if 'X-Real-Ip' in flask.request.headers:
return flask.request.headers.getlist('X-Real-Ip')[0]
return flask.request.remote_addr
def is_ssl():
for header in ('X-Forwarded-Proto', 'X-Forwarded-Protocol'):
if header in flask.request.headers and (
flask.request.headers[header].lower() in ('https', 'ssl')
):
return True
return False
def _parse_auth_header():
auth = flask.request.headers.get('authorization', '')
if auth.split(' ')[0].lower() != 'token':
logger.debug('check_token: Invalid token format')
return None
logger.debug('Auth Token = {0}'.format(auth))
auth = dict(_re_authorization.findall(auth))
logger.debug('auth = {0}'.format(auth))
return auth
def check_token(args):
logger.debug('args = {0}'.format(args))
if cfg.disable_token_auth is True or cfg.standalone is True:
return True
auth = _parse_auth_header()
if not auth:
return False
if 'namespace' in args and 'repository' in args:
# We're authorizing an action on a repository,
# let's check that it matches the repos name provided in the token
full_repos_name = '{namespace}/{repository}'.format(**args)
logger.debug('full_repos_name = {0}'.format(full_repos_name))
if full_repos_name != auth.get('repository'):
logger.debug('check_token: Wrong repository name in the token:'
'{0} != {1}'.format(full_repos_name,
auth.get('repository')))
return False
# Check that the token `access' variable is aligned with the HTTP method
access = auth.get('access')
if access == 'write' and flask.request.method not in ['POST', 'PUT']:
logger.debug('check_token: Wrong access value in the token')
return False
if access == 'read' and flask.request.method != 'GET':
logger.debug('check_token: Wrong access value in the token')
return False
if access == 'delete' and flask.request.method != 'DELETE':
logger.debug('check_token: Wrong access value in the token')
return False
if validate_token(auth) is False:
return False
# Token is valid
return True
def check_signature():
pkey = cfg.privileged_key
if not pkey:
return False
headers = flask.request.headers
signature = headers.get('X-Signature')
if not signature:
logger.debug('No X-Signature header in request')
return False
sig = parse_content_signature(signature)
logger.debug('Parsed signature: {}'.format(sig))
sigdata = base64.b64decode(sig['data'])
header_keys = sorted([
x for x in headers.iterkeys() if x.startswith('X-Docker')
])
message = ','.join([flask.request.method, flask.request.path] +
['{}:{}'.format(k, headers[k]) for k in header_keys])
logger.debug('Signed message: {}'.format(message))
try:
return pkey.verify(message_digest(message), sigdata, 'sha1') == 1
except RSA.RSAError as e:
logger.exception(e)
return False
def parse_content_signature(s):
lst = [x.strip().split('=', 1) for x in s.split(';')]
ret = {}
for k, v in lst:
ret[k] = v
return ret
def message_digest(s):
m = hashlib.new('sha1')
m.update(s)
return m.digest()
def requires_auth(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if check_signature() is True or check_token(kwargs) is True:
return f(*args, **kwargs)
headers = {'WWW-Authenticate': 'Token'}
return api_error('Requires authorization', 401, headers)
return wrapper
def api_error(message, code=400, headers=None):
logger.debug('api_error: {0}'.format(message))
return response({'error': message}, code, headers)
def gen_random_string(length=16):
return ''.join([random.choice(string.ascii_uppercase + string.digits)
for x in range(length)])
def parse_repository_name(f):
@functools.wraps(f)
def wrapper(repository, *args, **kwargs):
parts = repository.rstrip('/').split('/', 1)
if len(parts) < 2:
namespace = 'library'
repository = parts[0]
else:
(namespace, repository) = parts
repository = urllib.quote_plus(repository)
return f(namespace=namespace, repository=repository, *args, **kwargs)
return wrapper
def exclusive_lock(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
lock_path = os.path.join(
'./', 'registry.{0}.lock'.format(f.func_name)
)
if os.path.exists(lock_path):
x = 0
while os.path.exists(lock_path) and x < 100:
logger.warn('Another process is creating the search database')
x += 1
time.sleep(1)
if x == 100:
raise Exception('Timedout waiting for db init')
return
lock_file = open(lock_path, 'w')
lock_file.close()
try:
result = f(*args, **kwargs)
finally:
os.remove(lock_path)
return result
return wrapper
def get_repository():
auth = flask.request.headers.get('authorization', '')
if not auth:
return
auth = dict(_re_authorization.findall(auth))
repository = auth.get('repository')
if repository is None:
return ('', '')
parts = repository.rstrip('/').split('/', 1)
if len(parts) < 2:
return ('library', parts[0])
return (parts[0], parts[1])
def get_endpoints(overcfg=None):
registry_endpoints = (overcfg or cfg).registry_endpoints
if not registry_endpoints:
# registry_endpoints = socket.gethostname()
registry_endpoints = flask.request.environ['HTTP_HOST']
return registry_endpoints
| apache-2.0 |
emsrc/daeso-dutch | test/graph/test_alpinograph.py | 1 | 3993 | """
test AlpinoGraph class
"""
import unittest
from daeso_nl.graph.alpinograph import AlpinoGraph
class Test_AlpinoGraph(unittest.TestCase):
def setUp(self):
self.ag = AlpinoGraph(root="0")
self.ag.add_node("0", "top", cat="top",
tokens="Ik wilde weten of hij echt begrip had .".split())
self.ag.add_node("1", "smain", cat="smain",
tokens="Ik wilde weten of hij echt begrip had".split())
self.ag.add_node("2", "pron", pos="pron", root="ik", index="1",
tokens="Ik".split())
self.ag.add_node("3", "verb", pos="verb", root="willen",
tokens="wilde".split())
self.ag.add_node("4", "inf", cat="inf",
tokens="weten of hij echt begrip had".split())
self.ag.add_node("5", "index", index="1")
self.ag.add_node("6", "verb", pos="verb", root="weten",
tokens="weten".split())
self.ag.add_node("7", "cp", cat="cp",
tokens="of hij echt begrip had".split())
self.ag.add_node("8", "comp", pos="comp", root="of",
tokens="of".split())
self.ag.add_node("9", "ssub", cat="ssub",
tokens="hij echt begrip had".split())
self.ag.add_node("10", "pron", pos="pron", root="hij",
tokens="hij".split())
self.ag.add_node("11", "np", cat="np",
tokens="echt begrip".split())
self.ag.add_node("12", "adj", pos="adj", root="echt",
tokens="echt".split())
self.ag.add_node("13", "noun", pos="noun", root="begrip",
tokens="begrip".split())
self.ag.add_node("14", "verb", pos="verb", root="hebben",
tokens="had".split())
self.ag.add_node("15", "punt", pos="punct", root=".",
tokens=".".split())
self.ag.add_edge("0", "1", "--")
self.ag.add_edge("1", "2", "su")
self.ag.add_edge("1", "3", "hd")
self.ag.add_edge("1", "4", "vc")
self.ag.add_edge("4", "5", "su")
self.ag.add_edge("4", "6", "hd")
self.ag.add_edge("4", "7", "vc")
self.ag.add_edge("7", "8", "cmp")
self.ag.add_edge("7", "9", "body")
self.ag.add_edge("9", "10", "su")
self.ag.add_edge("9", "11", "obj1")
self.ag.add_edge("11", "12", "mod")
self.ag.add_edge("11", "13", "hd")
self.ag.add_edge("9", "14", "hd")
self.ag.add_edge("0", "15", "punct")
def test_print_subtree(self):
print "\n", self.ag
def test_node_is_nominal(self):
self.assertTrue(self.ag.node_is_nominal("13"))
self.assertFalse(self.ag.node_is_nominal("3"))
def test_node_is_punct(self):
self.assertTrue(self.ag.node_is_punct("15"))
self.assertFalse(self.ag.node_is_punct("14"))
def test_node_is_index(self):
self.assertTrue(self.ag.node_is_index("5"))
self.assertFalse(self.ag.node_is_index("1"))
self.assertFalse(self.ag.node_is_index("2"))
def test_get_root_node(self):
self.assertEqual(self.ag.root, "0")
def test_get_parent_node(self):
self.assertEqual(self.ag.get_parent_node("0"), None)
self.assertEqual(self.ag.get_parent_node("1"), "0")
def test_get_node_deprel(self):
self.assertEqual(self.ag.get_node_deprel("0"), None)
self.assertEqual(self.ag.get_node_deprel("15"), "punct")
def test_node_is_complement(self):
self.assertTrue(self.ag.node_is_complement("11"))
self.assertFalse(self.ag.node_is_complement("12"))
if __name__ == '__main__':
import sys
sys.argv.append("-v")
unittest.main() | gpl-3.0 |
karrtikr/ete | examples/treeview/random_draw.py | 6 | 3818 | import os
from ete3 import Tree, faces
from ete3.treeview.main import TreeStyle, NodeStyle, random_color
import colorsys
import random
# ::::::::::::::
# Layout actions
# ::::::::::::::
def sphere_map(node):
# Creates a random color sphere face that will be floating over nodes
bubble = faces.CircleFace(random.randint(5,40), random_color(), "sphere")
bubble.opacity = 0.7
faces.add_face_to_node(bubble, node, 0, position="float")
def random_background(node):
# Set a random background color for each node partition
node.img_style["bgcolor"] = random_color()
def leaf_name(node):
if node.is_leaf():
nameF = faces.AttrFace("name")
nameF.border.width = 1
faces.add_face_to_node(nameF, node, 0, position="branch-right")
def aligned_faces(node):
if node.is_leaf():
for i in xrange(3):
F = faces.TextFace("ABCDEFGHIJK"[0:random.randint(1,11)])
F.border.width = 1
F.border.line_style = 1
F.inner_background.color = "lightgreen"
F.border.width = 1
F.inner_border.width = 1
F.background.color = "darkgreen"
F.border.width = 2
F.vt_align = random.randint(0,4)
F.hz_align = random.randint(0,4)
F.margin_bottom = random.randint(1, 20)
F.margin_right = random.randint(1, 20)
F.margin_left = random.randint(1, 20)
F.margin_top = random.randint(1, 20)
faces.add_face_to_node(F, node, i, position="aligned")
if random.randint(0, 1):
faces.add_face_to_node(F, node, i, position="aligned")
def master_ly(node):
random_background(node)
sphere_map(node)
leaf_name(node)
aligned_faces(node)
def tiny_ly(node):
node.img_style["size"] = 2
node.img_style["shape"] = "square"
size = 15
t = Tree()
t.populate(size, reuse_names=False)
I = TreeStyle()
I.mode = "r"
I.orientation = 0
I.layout_fn = master_ly
I.margin_left = 100
I.margin_right = 50
I.margin_top = 100
I.arc_start = 45
I.arc_span = 360
I.margin_bottom = 50
I.show_border = True
I.legend_position = 4
I.title.add_face(faces.TextFace("HOLA MUNDO", fsize=30), 0)
I.draw_aligned_faces_as_table = True
def test(node):
if node.is_leaf():
faces.add_face_to_node(faces.AttrFace("name"), node, 0, position="aligned")
I.aligned_header.add_face( faces.TextFace("H1"), 0 )
I.aligned_header.add_face( faces.TextFace("H1"), 1 )
I.aligned_header.add_face( faces.TextFace("H1"), 2 )
I.aligned_header.add_face( faces.TextFace("H1111111111111"), 3 )
I.aligned_header.add_face( faces.TextFace("H1"), 4 )
I.aligned_foot.add_face( faces.TextFace("FO1"), 0 )
I.aligned_foot.add_face( faces.TextFace("FO1"), 1 )
I.aligned_foot.add_face( faces.TextFace("FO1"), 2 )
I.aligned_foot.add_face( faces.TextFace("F1"), 3 )
I.aligned_foot.add_face( faces.TextFace("FO1"), 4 )
I.legend.add_face(faces.CircleFace(30, random_color(), "sphere"), 0)
I.legend.add_face(faces.CircleFace(30, random_color(), "sphere"), 0)
I.legend.add_face(faces.TextFace("HOLA"), 1)
I.legend.add_face(faces.TextFace("HOLA"), 1)
# Creates a random tree with 10 leaves
t2 = Tree()
t2.populate(10)
# Creates a fixed NodeStyle object containing a TreeFace (A tree image
# as a face within another tree image)
# t.add_face(faces.TreeFace(t2, I), "branch-right", 0)
# Attach the fixed style to the first child of the root node
# t.children[0].img_style = style
I.rotation = 90
I.mode = "c"
t.show(tree_style=I)
#t.render("/home/jhuerta/test.svg", img_properties=I)
#t.render("/home/jhuerta/test.pdf", img_properties=I)
#t.render("/home/jhuerta/test.png", img_properties=I)
#t.render("/home/jhuerta/test.ps", img_properties=I)
#os.system("inkscape /home/jhuerta/test.svg")
#I.mode = "c"
#t.show(img_properties=I)
| gpl-3.0 |
vmarkovtsev/django | tests/gis_tests/geoapp/test_feeds.py | 292 | 4194 | from __future__ import unicode_literals
from xml.dom import minidom
from django.conf import settings
from django.contrib.sites.models import Site
from django.test import (
TestCase, modify_settings, override_settings, skipUnlessDBFeature,
)
from .models import City
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
@override_settings(ROOT_URLCONF='gis_tests.geoapp.urls')
@skipUnlessDBFeature("gis_enabled")
class GeoFeedTest(TestCase):
fixtures = ['initial']
def setUp(self):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
def assertChildNodes(self, elem, expected):
"Taken from syndication/tests.py."
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def test_geofeed_rss(self):
"Tests geographic feeds using GeoRSS over RSSv2."
# Uses `GEOSGeometry` in `item_geometry`
doc1 = minidom.parseString(self.client.get('/feeds/rss1/').content)
# Uses a 2-tuple in `item_geometry`
doc2 = minidom.parseString(self.client.get('/feeds/rss2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2.getElementsByTagName('channel')[0],
['title', 'link', 'description', 'language',
'lastBuildDate', 'item', 'georss:box', 'atom:link']
)
# Incrementing through the feeds.
for feed in [feed1, feed2]:
# Ensuring the georss namespace was added to the <rss> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the georss element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'georss:point'])
def test_geofeed_atom(self):
"Testing geographic feeds using GeoRSS over Atom."
doc1 = minidom.parseString(self.client.get('/feeds/atom1/').content)
doc2 = minidom.parseString(self.client.get('/feeds/atom2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2, ['title', 'link', 'id', 'updated', 'entry', 'georss:box'])
for feed in [feed1, feed2]:
# Ensuring the georsss namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), City.objects.count())
# Ensuring the georss element was added to each entry in the feed.
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'georss:point'])
def test_geofeed_w3c(self):
"Testing geographic feeds using W3C Geo."
doc = minidom.parseString(self.client.get('/feeds/w3cgeo1/').content)
feed = doc.firstChild
# Ensuring the geo namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:geo'), 'http://www.w3.org/2003/01/geo/wgs84_pos#')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the geo:lat and geo:lon element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'geo:lat', 'geo:lon'])
# Boxes and Polygons aren't allowed in W3C Geo feeds.
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo2/') # Box in <channel>
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo3/') # Polygons in <entry>
| bsd-3-clause |
eestay/edx-platform | cms/djangoapps/contentstore/views/tests/test_unit_page.py | 222 | 2775 | """
Unit tests for the unit page.
"""
from contentstore.views.tests.utils import StudioPageTestCase
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import ItemFactory
from xmodule.x_module import STUDENT_VIEW
class UnitPageTestCase(StudioPageTestCase):
"""
Unit tests for the unit page.
"""
def setUp(self):
super(UnitPageTestCase, self).setUp()
self.vertical = ItemFactory.create(parent_location=self.sequential.location,
category='vertical', display_name='Unit')
self.video = ItemFactory.create(parent_location=self.vertical.location,
category="video", display_name="My Video")
self.store = modulestore()
def test_public_component_preview_html(self):
"""
Verify that a public xblock's preview returns the expected HTML.
"""
published_video = self.store.publish(self.video.location, self.user.id)
self.validate_preview_html(self.video, STUDENT_VIEW, can_add=False)
def test_draft_component_preview_html(self):
"""
Verify that a draft xblock's preview returns the expected HTML.
"""
self.validate_preview_html(self.video, STUDENT_VIEW, can_add=False)
def test_public_child_container_preview_html(self):
"""
Verify that a public child container rendering on the unit page (which shows a View arrow
to the container page) returns the expected HTML.
"""
child_container = ItemFactory.create(parent_location=self.vertical.location,
category='split_test', display_name='Split Test')
ItemFactory.create(parent_location=child_container.location,
category='html', display_name='grandchild')
published_child_container = self.store.publish(child_container.location, self.user.id)
self.validate_preview_html(published_child_container, STUDENT_VIEW, can_add=False)
def test_draft_child_container_preview_html(self):
"""
Verify that a draft child container rendering on the unit page (which shows a View arrow
to the container page) returns the expected HTML.
"""
child_container = ItemFactory.create(parent_location=self.vertical.location,
category='split_test', display_name='Split Test')
ItemFactory.create(parent_location=child_container.location,
category='html', display_name='grandchild')
draft_child_container = self.store.get_item(child_container.location)
self.validate_preview_html(draft_child_container, STUDENT_VIEW, can_add=False)
| agpl-3.0 |
jlspyaozhongkai/Uter | third_party_build/Python-2.7.9/lib/python2.7/idlelib/StackViewer.py | 25 | 4404 | import os
import sys
import linecache
import re
import Tkinter as tk
from idlelib.TreeWidget import TreeNode, TreeItem, ScrolledCanvas
from idlelib.ObjectBrowser import ObjectTreeItem, make_objecttreeitem
from idlelib.PyShell import PyShellFileList
def StackBrowser(root, flist=None, tb=None, top=None):
if top is None:
from Tkinter import Toplevel
top = Toplevel(root)
sc = ScrolledCanvas(top, bg="white", highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
item = StackTreeItem(flist, tb)
node = TreeNode(sc.canvas, None, item)
node.expand()
class StackTreeItem(TreeItem):
def __init__(self, flist=None, tb=None):
self.flist = flist
self.stack = self.get_stack(tb)
self.text = self.get_exception()
def get_stack(self, tb):
if tb is None:
tb = sys.last_traceback
stack = []
if tb and tb.tb_frame is None:
tb = tb.tb_next
while tb is not None:
stack.append((tb.tb_frame, tb.tb_lineno))
tb = tb.tb_next
return stack
def get_exception(self):
type = sys.last_type
value = sys.last_value
if hasattr(type, "__name__"):
type = type.__name__
s = str(type)
if value is not None:
s = s + ": " + str(value)
return s
def GetText(self):
return self.text
def GetSubList(self):
sublist = []
for info in self.stack:
item = FrameTreeItem(info, self.flist)
sublist.append(item)
return sublist
class FrameTreeItem(TreeItem):
def __init__(self, info, flist):
self.info = info
self.flist = flist
def GetText(self):
frame, lineno = self.info
try:
modname = frame.f_globals["__name__"]
except:
modname = "?"
code = frame.f_code
filename = code.co_filename
funcname = code.co_name
sourceline = linecache.getline(filename, lineno)
sourceline = sourceline.strip()
if funcname in ("?", "", None):
item = "%s, line %d: %s" % (modname, lineno, sourceline)
else:
item = "%s.%s(...), line %d: %s" % (modname, funcname,
lineno, sourceline)
return item
def GetSubList(self):
frame, lineno = self.info
sublist = []
if frame.f_globals is not frame.f_locals:
item = VariablesTreeItem("<locals>", frame.f_locals, self.flist)
sublist.append(item)
item = VariablesTreeItem("<globals>", frame.f_globals, self.flist)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if self.flist:
frame, lineno = self.info
filename = frame.f_code.co_filename
if os.path.isfile(filename):
self.flist.gotofileline(filename, lineno)
class VariablesTreeItem(ObjectTreeItem):
def GetText(self):
return self.labeltext
def GetLabelText(self):
return None
def IsExpandable(self):
return len(self.object) > 0
def keys(self):
return self.object.keys()
def GetSubList(self):
sublist = []
for key in self.keys():
try:
value = self.object[key]
except KeyError:
continue
def setfunction(value, key=key, object=self.object):
object[key] = value
item = make_objecttreeitem(key + " =", value, setfunction)
sublist.append(item)
return sublist
def _stack_viewer(parent):
root = tk.Tk()
root.title("Test StackViewer")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
flist = PyShellFileList(root)
try: # to obtain a traceback object
intentional_name_error
except NameError:
exc_type, exc_value, exc_tb = sys.exc_info()
# inject stack trace to sys
sys.last_type = exc_type
sys.last_value = exc_value
sys.last_traceback = exc_tb
StackBrowser(root, flist=flist, top=root, tb=exc_tb)
# restore sys to original state
del sys.last_type
del sys.last_value
del sys.last_traceback
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_stack_viewer)
| gpl-3.0 |
mmckinst/pykickstart | pykickstart/commands/sshkey.py | 3 | 3231 | #
# Brian C. Lane <bcl@redhat.com>
#
# Copyright 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import BaseData, KickstartCommand
from pykickstart.errors import KickstartValueError, formatErrorMsg
from pykickstart.options import KSOptionParser
import warnings
from pykickstart.i18n import _
class F22_SshKeyData(BaseData):
removedKeywords = BaseData.removedKeywords
removedAttrs = BaseData.removedAttrs
def __init__(self, *args, **kwargs):
BaseData.__init__(self, *args, **kwargs)
self.username = kwargs.get("username", None)
self.key = kwargs.get("key", "")
def __eq__(self, y):
if not y:
return False
return self.username == y.username
def __ne__(self, y):
return not self == y
def __str__(self):
retval = BaseData.__str__(self)
retval += "sshkey"
retval += self._getArgsAsStr() + '\n'
return retval
def _getArgsAsStr(self):
retval = ""
retval += " --username=%s" % self.username
retval += ' "%s"' % self.key
return retval
class F22_SshKey(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.sshUserList = kwargs.get("sshUserList", [])
def __str__(self):
retval = ""
for user in self.sshUserList:
retval += user.__str__()
return retval
def _getParser(self):
op = KSOptionParser()
op.add_option("--username", dest="username", required=True)
return op
def parse(self, args):
ud = self.handler.SshKeyData()
(opts, extra) = self.op.parse_args(args=args, lineno=self.lineno)
self._setToObj(self.op, opts, ud)
ud.lineno = self.lineno
if len(extra) != 1:
raise KickstartValueError(formatErrorMsg(self.lineno, msg=_("A single argument is expected for the %s command") % "sshkey"))
ud.key = extra[0]
if ud in self.dataList():
warnings.warn(_("An ssh user with the name %s has already been defined.") % ud.username)
return ud
def dataList(self):
return self.sshUserList
| gpl-2.0 |
kasioumis/invenio | invenio/version.py | 8 | 3488 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Version information for Invenio.
This file is imported by ``invenio.__init__``, and parsed by ``setup.py``.
"""
# Respect the following format: major, minor, patch, ..., "dev"?, revision?
#
# - major, minor, patch are numbers starting at zero.
# - you can put as much sub version as you need before 'dev'
# - dev has to be set in development mode (non-release).
# - revision can be set if you want to override the date coming from git.
#
# See the doctest below.
version = (2, 1, 0, 'dev', 20150305)
def build_version(*args):
"""Build a PEP440 compatible version based on a list of arguments.
Inspired by Django's django.utils.version
.. doctest::
>>> print(build_version(1, 0, 0))
1.0.0
>>> print(build_version(1, 1, 1))
1.1.1
>>> print(build_version(1, 2, 3, 4))
1.2.3.4
>>> print(build_version(2, 0, 0, 'dev', 1))
2.0.0.dev1
>>> print(build_version(2, 0, 0, 'dev')) # doctest: +ELLIPSIS
2.0.0.dev...
>>> print(build_version(2, 0, 1, 'dev')) # doctest: +ELLIPSIS
2.0.1.dev...
>>> print(build_version(1, 2, 3, 4, 5, 6, 'dev')) # doctest: +ELLIPSIS
1.2.3.4.5.6.dev...
"""
if 'dev' in args:
pos = args.index('dev')
else:
pos = len(args)
def zero_search(acc, x):
"""Increment the counter until it stops seeing zeros."""
position, searching = acc
if searching:
if x != 0:
searching = False
else:
position += 1
return (position, searching)
last_zero = pos + 1 - reduce(zero_search, reversed(args[:pos]), (1, True))[0]
parts = max(3, last_zero)
version = '.'.join(str(arg) for arg in args[:parts])
if len(args) > pos:
revision = args[pos + 1] if len(args) > pos + 1 else git_revision()
version += '.dev{0}'.format(revision)
return version
def git_revision():
"""Get the timestamp of the latest git revision."""
if not hasattr(git_revision, '_cache'):
import datetime
import subprocess
call = subprocess.Popen(r'git log -1 --pretty=format:%ct --quiet HEAD',
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
stdout, _ = call.communicate()
try:
timestamp = int(stdout.decode())
ts = datetime.datetime.utcfromtimestamp(timestamp)
revision = ts.strftime('%Y%m%d%H%M%S')
except ValueError:
revision = '0'
git_revision._cache = revision
return git_revision._cache
__version__ = build_version(*version)
| gpl-2.0 |
Azure/azure-sdk-for-python | sdk/formrecognizer/azure-ai-formrecognizer/tests/test_content_type.py | 1 | 2199 | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
from azure.ai.formrecognizer._helpers import get_content_type
from testcase import FormRecognizerTest
class TestContentType(FormRecognizerTest):
def test_pdf(self):
with open(self.invoice_pdf, "rb") as fd:
content_type = get_content_type(fd)
self.assertEqual(content_type, "application/pdf")
def test_pdf_bytes(self):
with open(self.invoice_pdf, "rb") as fd:
myfile = fd.read()
content_type = get_content_type(myfile)
self.assertEqual(content_type, "application/pdf")
def test_jpg(self):
with open(self.form_jpg, "rb") as fd:
content_type = get_content_type(fd)
self.assertEqual(content_type, "image/jpeg")
def test_jpg_bytes(self):
with open(self.form_jpg, "rb") as fd:
myfile = fd.read()
content_type = get_content_type(myfile)
self.assertEqual(content_type, "image/jpeg")
def test_png(self):
with open(self.receipt_png, "rb") as fd:
content_type = get_content_type(fd)
self.assertEqual(content_type, "image/png")
def test_png_bytes(self):
with open(self.receipt_png, "rb") as fd:
myfile = fd.read()
content_type = get_content_type(myfile)
self.assertEqual(content_type, "image/png")
def test_tiff_little_endian(self):
with open(self.invoice_tiff, "rb") as fd:
content_type = get_content_type(fd)
self.assertEqual(content_type, "image/tiff")
def test_tiff_little_endian_bytes(self):
with open(self.invoice_tiff, "rb") as fd:
myfile = fd.read()
content_type = get_content_type(myfile)
self.assertEqual(content_type, "image/tiff")
def test_tiff_big_endian(self):
content_type = get_content_type(b"\x4D\x4D\x00\x2A")
self.assertEqual(content_type, "image/tiff")
def test_bmp(self):
content_type = get_content_type(b"\x42\x4D\x00\x00")
self.assertEqual(content_type, "image/bmp")
| mit |
whix/tablib | tablib/packages/xlwt/Workbook.py | 57 | 20514 | # -*- coding: windows-1252 -*-
'''
Record Order in BIFF8
Workbook Globals Substream
BOF Type = workbook globals
Interface Header
MMS
Interface End
WRITEACCESS
CODEPAGE
DSF
TABID
FNGROUPCOUNT
Workbook Protection Block
WINDOWPROTECT
PROTECT
PASSWORD
PROT4REV
PROT4REVPASS
BACKUP
HIDEOBJ
WINDOW1
DATEMODE
PRECISION
REFRESHALL
BOOKBOOL
FONT +
FORMAT *
XF +
STYLE +
? PALETTE
USESELFS
BOUNDSHEET +
COUNTRY
? Link Table
SST
ExtSST
EOF
'''
import BIFFRecords
import Style
class Workbook(object):
#################################################################
## Constructor
#################################################################
def __init__(self, encoding='ascii', style_compression=0):
self.encoding = encoding
self.__owner = 'None'
self.__country_code = None # 0x07 is Russia :-)
self.__wnd_protect = 0
self.__obj_protect = 0
self.__protect = 0
self.__backup_on_save = 0
# for WINDOW1 record
self.__hpos_twips = 0x01E0
self.__vpos_twips = 0x005A
self.__width_twips = 0x3FCF
self.__height_twips = 0x2A4E
self.__active_sheet = 0
self.__first_tab_index = 0
self.__selected_tabs = 0x01
self.__tab_width_twips = 0x0258
self.__wnd_hidden = 0
self.__wnd_mini = 0
self.__hscroll_visible = 1
self.__vscroll_visible = 1
self.__tabs_visible = 1
self.__styles = Style.StyleCollection(style_compression)
self.__dates_1904 = 0
self.__use_cell_values = 1
self.__sst = BIFFRecords.SharedStringTable(self.encoding)
self.__worksheets = []
self.__worksheet_idx_from_name = {}
self.__sheet_refs = {}
self._supbook_xref = {}
self._xcall_xref = {}
self._ownbook_supbookx = None
self._ownbook_supbook_ref = None
self._xcall_supbookx = None
self._xcall_supbook_ref = None
#################################################################
## Properties, "getters", "setters"
#################################################################
def get_style_stats(self):
return self.__styles.stats[:]
def set_owner(self, value):
self.__owner = value
def get_owner(self):
return self.__owner
owner = property(get_owner, set_owner)
#################################################################
def set_country_code(self, value):
self.__country_code = value
def get_country_code(self):
return self.__country_code
country_code = property(get_country_code, set_country_code)
#################################################################
def set_wnd_protect(self, value):
self.__wnd_protect = int(value)
def get_wnd_protect(self):
return bool(self.__wnd_protect)
wnd_protect = property(get_wnd_protect, set_wnd_protect)
#################################################################
def set_obj_protect(self, value):
self.__obj_protect = int(value)
def get_obj_protect(self):
return bool(self.__obj_protect)
obj_protect = property(get_obj_protect, set_obj_protect)
#################################################################
def set_protect(self, value):
self.__protect = int(value)
def get_protect(self):
return bool(self.__protect)
protect = property(get_protect, set_protect)
#################################################################
def set_backup_on_save(self, value):
self.__backup_on_save = int(value)
def get_backup_on_save(self):
return bool(self.__backup_on_save)
backup_on_save = property(get_backup_on_save, set_backup_on_save)
#################################################################
def set_hpos(self, value):
self.__hpos_twips = value & 0xFFFF
def get_hpos(self):
return self.__hpos_twips
hpos = property(get_hpos, set_hpos)
#################################################################
def set_vpos(self, value):
self.__vpos_twips = value & 0xFFFF
def get_vpos(self):
return self.__vpos_twips
vpos = property(get_vpos, set_vpos)
#################################################################
def set_width(self, value):
self.__width_twips = value & 0xFFFF
def get_width(self):
return self.__width_twips
width = property(get_width, set_width)
#################################################################
def set_height(self, value):
self.__height_twips = value & 0xFFFF
def get_height(self):
return self.__height_twips
height = property(get_height, set_height)
#################################################################
def set_active_sheet(self, value):
self.__active_sheet = value & 0xFFFF
self.__first_tab_index = self.__active_sheet
def get_active_sheet(self):
return self.__active_sheet
active_sheet = property(get_active_sheet, set_active_sheet)
#################################################################
def set_tab_width(self, value):
self.__tab_width_twips = value & 0xFFFF
def get_tab_width(self):
return self.__tab_width_twips
tab_width = property(get_tab_width, set_tab_width)
#################################################################
def set_wnd_visible(self, value):
self.__wnd_hidden = int(not value)
def get_wnd_visible(self):
return not bool(self.__wnd_hidden)
wnd_visible = property(get_wnd_visible, set_wnd_visible)
#################################################################
def set_wnd_mini(self, value):
self.__wnd_mini = int(value)
def get_wnd_mini(self):
return bool(self.__wnd_mini)
wnd_mini = property(get_wnd_mini, set_wnd_mini)
#################################################################
def set_hscroll_visible(self, value):
self.__hscroll_visible = int(value)
def get_hscroll_visible(self):
return bool(self.__hscroll_visible)
hscroll_visible = property(get_hscroll_visible, set_hscroll_visible)
#################################################################
def set_vscroll_visible(self, value):
self.__vscroll_visible = int(value)
def get_vscroll_visible(self):
return bool(self.__vscroll_visible)
vscroll_visible = property(get_vscroll_visible, set_vscroll_visible)
#################################################################
def set_tabs_visible(self, value):
self.__tabs_visible = int(value)
def get_tabs_visible(self):
return bool(self.__tabs_visible)
tabs_visible = property(get_tabs_visible, set_tabs_visible)
#################################################################
def set_dates_1904(self, value):
self.__dates_1904 = int(value)
def get_dates_1904(self):
return bool(self.__dates_1904)
dates_1904 = property(get_dates_1904, set_dates_1904)
#################################################################
def set_use_cell_values(self, value):
self.__use_cell_values = int(value)
def get_use_cell_values(self):
return bool(self.__use_cell_values)
use_cell_values = property(get_use_cell_values, set_use_cell_values)
#################################################################
def get_default_style(self):
return self.__styles.default_style
default_style = property(get_default_style)
##################################################################
## Methods
##################################################################
def add_style(self, style):
return self.__styles.add(style)
def add_str(self, s):
return self.__sst.add_str(s)
def del_str(self, sst_idx):
self.__sst.del_str(sst_idx)
def str_index(self, s):
return self.__sst.str_index(s)
def add_sheet(self, sheetname, cell_overwrite_ok=False):
import Worksheet, Utils
if not isinstance(sheetname, unicode):
sheetname = sheetname.decode(self.encoding)
if not Utils.valid_sheet_name(sheetname):
raise Exception("invalid worksheet name %r" % sheetname)
lower_name = sheetname.lower()
if lower_name in self.__worksheet_idx_from_name:
raise Exception("duplicate worksheet name %r" % sheetname)
self.__worksheet_idx_from_name[lower_name] = len(self.__worksheets)
self.__worksheets.append(Worksheet.Worksheet(sheetname, self, cell_overwrite_ok))
return self.__worksheets[-1]
def get_sheet(self, sheetnum):
return self.__worksheets[sheetnum]
def raise_bad_sheetname(self, sheetname):
raise Exception("Formula: unknown sheet name %s" % sheetname)
def convert_sheetindex(self, strg_ref, n_sheets):
idx = int(strg_ref)
if 0 <= idx < n_sheets:
return idx
msg = "Formula: sheet index (%s) >= number of sheets (%d)" % (strg_ref, n_sheets)
raise Exception(msg)
def _get_supbook_index(self, tag):
if tag in self._supbook_xref:
return self._supbook_xref[tag]
self._supbook_xref[tag] = idx = len(self._supbook_xref)
return idx
def setup_ownbook(self):
self._ownbook_supbookx = self._get_supbook_index(('ownbook', 0))
self._ownbook_supbook_ref = None
reference = (self._ownbook_supbookx, 0xFFFE, 0xFFFE)
if reference in self.__sheet_refs:
raise Exception("can't happen")
self.__sheet_refs[reference] = self._ownbook_supbook_ref = len(self.__sheet_refs)
def setup_xcall(self):
self._xcall_supbookx = self._get_supbook_index(('xcall', 0))
self._xcall_supbook_ref = None
reference = (self._xcall_supbookx, 0xFFFE, 0xFFFE)
if reference in self.__sheet_refs:
raise Exception("can't happen")
self.__sheet_refs[reference] = self._xcall_supbook_ref = len(self.__sheet_refs)
def add_sheet_reference(self, formula):
patches = []
n_sheets = len(self.__worksheets)
sheet_refs, xcall_refs = formula.get_references()
for ref0, ref1, offset in sheet_refs:
if not ref0.isdigit():
try:
ref0n = self.__worksheet_idx_from_name[ref0.lower()]
except KeyError:
self.raise_bad_sheetname(ref0)
else:
ref0n = self.convert_sheetindex(ref0, n_sheets)
if ref1 == ref0:
ref1n = ref0n
elif not ref1.isdigit():
try:
ref1n = self.__worksheet_idx_from_name[ref1.lower()]
except KeyError:
self.raise_bad_sheetname(ref1)
else:
ref1n = self.convert_sheetindex(ref1, n_sheets)
if ref1n < ref0n:
msg = "Formula: sheets out of order; %r:%r -> (%d, %d)" \
% (ref0, ref1, ref0n, ref1n)
raise Exception(msg)
if self._ownbook_supbookx is None:
self.setup_ownbook()
reference = (self._ownbook_supbookx, ref0n, ref1n)
if reference in self.__sheet_refs:
patches.append((offset, self.__sheet_refs[reference]))
else:
nrefs = len(self.__sheet_refs)
if nrefs > 65535:
raise Exception('More than 65536 inter-sheet references')
self.__sheet_refs[reference] = nrefs
patches.append((offset, nrefs))
for funcname, offset in xcall_refs:
if self._ownbook_supbookx is None:
self.setup_ownbook()
if self._xcall_supbookx is None:
self.setup_xcall()
# print funcname, self._supbook_xref
patches.append((offset, self._xcall_supbook_ref))
if not isinstance(funcname, unicode):
funcname = funcname.decode(self.encoding)
if funcname in self._xcall_xref:
idx = self._xcall_xref[funcname]
else:
self._xcall_xref[funcname] = idx = len(self._xcall_xref)
patches.append((offset + 2, idx + 1))
formula.patch_references(patches)
##################################################################
## BIFF records generation
##################################################################
def __bof_rec(self):
return BIFFRecords.Biff8BOFRecord(BIFFRecords.Biff8BOFRecord.BOOK_GLOBAL).get()
def __eof_rec(self):
return BIFFRecords.EOFRecord().get()
def __intf_hdr_rec(self):
return BIFFRecords.InteraceHdrRecord().get()
def __intf_end_rec(self):
return BIFFRecords.InteraceEndRecord().get()
def __intf_mms_rec(self):
return BIFFRecords.MMSRecord().get()
def __write_access_rec(self):
return BIFFRecords.WriteAccessRecord(self.__owner).get()
def __wnd_protect_rec(self):
return BIFFRecords.WindowProtectRecord(self.__wnd_protect).get()
def __obj_protect_rec(self):
return BIFFRecords.ObjectProtectRecord(self.__obj_protect).get()
def __protect_rec(self):
return BIFFRecords.ProtectRecord(self.__protect).get()
def __password_rec(self):
return BIFFRecords.PasswordRecord().get()
def __prot4rev_rec(self):
return BIFFRecords.Prot4RevRecord().get()
def __prot4rev_pass_rec(self):
return BIFFRecords.Prot4RevPassRecord().get()
def __backup_rec(self):
return BIFFRecords.BackupRecord(self.__backup_on_save).get()
def __hide_obj_rec(self):
return BIFFRecords.HideObjRecord().get()
def __window1_rec(self):
flags = 0
flags |= (self.__wnd_hidden) << 0
flags |= (self.__wnd_mini) << 1
flags |= (self.__hscroll_visible) << 3
flags |= (self.__vscroll_visible) << 4
flags |= (self.__tabs_visible) << 5
return BIFFRecords.Window1Record(self.__hpos_twips, self.__vpos_twips,
self.__width_twips, self.__height_twips,
flags,
self.__active_sheet, self.__first_tab_index,
self.__selected_tabs, self.__tab_width_twips).get()
def __codepage_rec(self):
return BIFFRecords.CodepageBiff8Record().get()
def __country_rec(self):
if not self.__country_code:
return ''
return BIFFRecords.CountryRecord(self.__country_code, self.__country_code).get()
def __dsf_rec(self):
return BIFFRecords.DSFRecord().get()
def __tabid_rec(self):
return BIFFRecords.TabIDRecord(len(self.__worksheets)).get()
def __fngroupcount_rec(self):
return BIFFRecords.FnGroupCountRecord().get()
def __datemode_rec(self):
return BIFFRecords.DateModeRecord(self.__dates_1904).get()
def __precision_rec(self):
return BIFFRecords.PrecisionRecord(self.__use_cell_values).get()
def __refresh_all_rec(self):
return BIFFRecords.RefreshAllRecord().get()
def __bookbool_rec(self):
return BIFFRecords.BookBoolRecord().get()
def __all_fonts_num_formats_xf_styles_rec(self):
return self.__styles.get_biff_data()
def __palette_rec(self):
result = ''
return result
def __useselfs_rec(self):
return BIFFRecords.UseSelfsRecord().get()
def __boundsheets_rec(self, data_len_before, data_len_after, sheet_biff_lens):
# .................................
# BOUNDSEHEET0
# BOUNDSEHEET1
# BOUNDSEHEET2
# ..................................
# WORKSHEET0
# WORKSHEET1
# WORKSHEET2
boundsheets_len = 0
for sheet in self.__worksheets:
boundsheets_len += len(BIFFRecords.BoundSheetRecord(
0x00L, sheet.visibility, sheet.name, self.encoding
).get())
start = data_len_before + boundsheets_len + data_len_after
result = ''
for sheet_biff_len, sheet in zip(sheet_biff_lens, self.__worksheets):
result += BIFFRecords.BoundSheetRecord(
start, sheet.visibility, sheet.name, self.encoding
).get()
start += sheet_biff_len
return result
def __all_links_rec(self):
pieces = []
temp = [(idx, tag) for tag, idx in self._supbook_xref.items()]
temp.sort()
for idx, tag in temp:
stype, snum = tag
if stype == 'ownbook':
rec = BIFFRecords.InternalReferenceSupBookRecord(len(self.__worksheets)).get()
pieces.append(rec)
elif stype == 'xcall':
rec = BIFFRecords.XcallSupBookRecord().get()
pieces.append(rec)
temp = [(idx, name) for name, idx in self._xcall_xref.items()]
temp.sort()
for idx, name in temp:
rec = BIFFRecords.ExternnameRecord(
options=0, index=0, name=name, fmla='\x02\x00\x1c\x17').get()
pieces.append(rec)
else:
raise Exception('unknown supbook stype %r' % stype)
if len(self.__sheet_refs) > 0:
# get references in index order
temp = [(idx, ref) for ref, idx in self.__sheet_refs.items()]
temp.sort()
temp = [ref for idx, ref in temp]
externsheet_record = BIFFRecords.ExternSheetRecord(temp).get()
pieces.append(externsheet_record)
return ''.join(pieces)
def __sst_rec(self):
return self.__sst.get_biff_record()
def __ext_sst_rec(self, abs_stream_pos):
return ''
#return BIFFRecords.ExtSSTRecord(abs_stream_pos, self.sst_record.str_placement,
#self.sst_record.portions_len).get()
def get_biff_data(self):
before = ''
before += self.__bof_rec()
before += self.__intf_hdr_rec()
before += self.__intf_mms_rec()
before += self.__intf_end_rec()
before += self.__write_access_rec()
before += self.__codepage_rec()
before += self.__dsf_rec()
before += self.__tabid_rec()
before += self.__fngroupcount_rec()
before += self.__wnd_protect_rec()
before += self.__protect_rec()
before += self.__obj_protect_rec()
before += self.__password_rec()
before += self.__prot4rev_rec()
before += self.__prot4rev_pass_rec()
before += self.__backup_rec()
before += self.__hide_obj_rec()
before += self.__window1_rec()
before += self.__datemode_rec()
before += self.__precision_rec()
before += self.__refresh_all_rec()
before += self.__bookbool_rec()
before += self.__all_fonts_num_formats_xf_styles_rec()
before += self.__palette_rec()
before += self.__useselfs_rec()
country = self.__country_rec()
all_links = self.__all_links_rec()
shared_str_table = self.__sst_rec()
after = country + all_links + shared_str_table
ext_sst = self.__ext_sst_rec(0) # need fake cause we need calc stream pos
eof = self.__eof_rec()
self.__worksheets[self.__active_sheet].selected = True
sheets = ''
sheet_biff_lens = []
for sheet in self.__worksheets:
data = sheet.get_biff_data()
sheets += data
sheet_biff_lens.append(len(data))
bundlesheets = self.__boundsheets_rec(len(before), len(after)+len(ext_sst)+len(eof), sheet_biff_lens)
sst_stream_pos = len(before) + len(bundlesheets) + len(country) + len(all_links)
ext_sst = self.__ext_sst_rec(sst_stream_pos)
return before + bundlesheets + after + ext_sst + eof + sheets
def save(self, filename):
import CompoundDoc
doc = CompoundDoc.XlsDoc()
doc.save(filename, self.get_biff_data())
| mit |
openbaoz/titanium_mobile | support/common/css/ply/lex.py | 37 | 40747 | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2009,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = "3.3"
__tabversion__ = "3.2" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("LEX WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("LEX ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
| apache-2.0 |
tung18tht/ICDAR-2017-Post-OCR-Correction | errors_detection/find_suspicious_eng_words.py | 1 | 1754 | import os, linecache, re, json
work_directory_path = os.path.dirname(os.path.realpath(__file__))
eng_words_file = open(work_directory_path + "/eng_words.txt", "rU")
eng_words = set()
for word in eng_words_file:
eng_words |= {word.rstrip()}
data_directory_path = work_directory_path + "/ICDAR2017_datasetPostOCR_Evaluation_2M_v1.2"
eng_data_directory_paths = [data_directory_path + "/eng_monograph", data_directory_path + "/eng_periodical"]
output_file = open(work_directory_path + "/Results/result_eng_words.json", "w")
output_file.write("{")
for eng_data_directory_path in eng_data_directory_paths:
for root_path, directories, files in os.walk(eng_data_directory_path):
for file in files:
if os.path.splitext(file)[1] == ".txt":
output_file.write("\n \""+os.path.basename(root_path)+"/"+file+"\": ")
errors = {}
file_path = root_path + "/" + file
ocr_output = linecache.getline(file_path, 1)[14:].strip()
word_begin_index = 0
for i, character in enumerate(ocr_output):
if character == ' ':
word_end_index = i
clean_word = re.sub('\W+', '', ocr_output[word_begin_index:word_end_index].lower())
if clean_word not in eng_words:
errors[str(word_begin_index)+":1"] = {}
word_begin_index = word_end_index + 1
clean_word = re.sub('\W+', '', ocr_output[word_begin_index:].lower())
if clean_word not in eng_words:
errors[str(word_begin_index)+":1"] = {}
output_file.write(json.dumps(errors, indent=8)+",")
output_file.seek(0, 2)
output_file.truncate(output_file.tell() - 1)
output_file = open(work_directory_path + "/Results/result_eng_words.json", "a")
output_file.write("\n}") | mit |
ahmetabdi/SickRage | lib/hachoir_metadata/file_system.py | 90 | 1107 | from lib.hachoir_metadata.metadata import RootMetadata, registerExtractor
from lib.hachoir_metadata.safe import fault_tolerant
from lib.hachoir_parser.file_system import ISO9660
from datetime import datetime
class ISO9660_Metadata(RootMetadata):
def extract(self, iso):
desc = iso['volume[0]/content']
self.title = desc['volume_id'].value
self.title = desc['vol_set_id'].value
self.author = desc['publisher'].value
self.author = desc['data_preparer'].value
self.producer = desc['application'].value
self.copyright = desc['copyright'].value
self.readTimestamp('creation_date', desc['creation_ts'].value)
self.readTimestamp('last_modification', desc['modification_ts'].value)
@fault_tolerant
def readTimestamp(self, key, value):
if value.startswith("0000"):
return
value = datetime(
int(value[0:4]), int(value[4:6]), int(value[6:8]),
int(value[8:10]), int(value[10:12]), int(value[12:14]))
setattr(self, key, value)
registerExtractor(ISO9660, ISO9660_Metadata)
| gpl-3.0 |
diagramsoftware/odoo | addons/event/res_partner.py | 329 | 1228 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class res_partner(models.Model):
_inherit = 'res.partner'
speaker = fields.Boolean(help="Check this box if this contact is a speaker.")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jacques/joyent-connector | test/feedvalidator/feedvalidator/feed.py | 2 | 3891 | """$Id: feed.py 582 2006-03-22 20:20:22Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 582 $"
__date__ = "$Date: 2006-03-23 08:20:22 +1200 (Thu, 23 Mar 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
__license__ = "Python"
from base import validatorBase
from validators import *
from logging import *
from itunes import itunes_channel
from extension import extension_feed
#
# Atom root element
#
class feed(validatorBase, extension_feed, itunes_channel):
def prevalidate(self):
self.links = []
self.line = self.dispatcher.locator.getLineNumber()
self.col = self.dispatcher.locator.getColumnNumber()
def missingElement(self, params):
offset = [self.line - self.dispatcher.locator.getLineNumber(),
self.col - self.dispatcher.locator.getColumnNumber()]
self.log(MissingElement(params), offset)
def validate_metadata(self):
if not 'title' in self.children:
self.missingElement({"parent":self.name, "element":"title"})
if not 'id' in self.children:
self.missingElement({"parent":self.name, "element":"id"})
if not 'updated' in self.children:
self.missingElement({"parent":self.name, "element":"updated"})
# ensure that there is a link rel="self"
for link in self.links:
if link.rel=='self': break
else:
offset = [self.line - self.dispatcher.locator.getLineNumber(),
self.col - self.dispatcher.locator.getColumnNumber()]
self.log(MissingSelf({"parent":self.parent.name, "element":self.name}), offset)
# can only have one alternate per type
types={}
for link in self.links:
if not link.rel=='alternate': continue
if not link.type in types: types[link.type]={}
if link.rel in types[link.type]:
if link.hreflang in types[link.type][link.rel]:
self.log(DuplicateAtomLink({"parent":self.name, "element":"link", "type":link.type, "hreflang":link.hreflang}))
else:
types[link.type][link.rel] += [link.hreflang]
else:
types[link.type][link.rel] = [link.hreflang]
if self.itunes: itunes_channel.validate(self)
def metadata(self):
if 'entry' in self.children:
self.log(MisplacedMetadata({"parent":self.name, "element":self.child}))
def validate(self):
if not 'entry' in self.children:
self.validate_metadata()
def do_author(self):
self.metadata()
from author import author
return author()
def do_category(self):
self.metadata()
from category import category
return category()
def do_contributor(self):
self.metadata()
from author import author
return author()
def do_generator(self):
self.metadata()
from generator import generator
return generator(), nonblank(), noduplicates()
def do_id(self):
self.metadata()
return canonicaluri(), nows(), noduplicates()
def do_icon(self):
self.metadata()
return nonblank(), nows(), rfc2396(), noduplicates()
def do_link(self):
self.metadata()
from link import link
self.links += [link()]
return self.links[-1]
def do_logo(self):
self.metadata()
return nonblank(), nows(), rfc2396(), noduplicates()
def do_title(self):
self.metadata()
from content import textConstruct
return textConstruct(), noduplicates()
def do_subtitle(self):
self.metadata()
from content import textConstruct
return textConstruct(), noduplicates()
def do_rights(self):
self.metadata()
from content import textConstruct
return textConstruct(), noduplicates()
def do_updated(self):
self.metadata()
return rfc3339(), nows(), noduplicates()
def do_entry(self):
if not 'entry' in self.children:
self.validate_metadata()
from entry import entry
return entry()
| gpl-2.0 |
kalahbrown/HueBigSQL | desktop/core/ext-py/guppy-0.1.10/guppy/gsl/Latex.py | 37 | 23407 | #._cv_part guppy.gsl.Latex
class Doc2Latex:
sizes = ('tiny', 'scriptsize', 'footnotesize', 'small',
'normalsize', 'large', 'Large', 'LARGE', 'huge', 'Huge')
def __init__(self, mod, doc, node):
self.mod = mod
self.doc = doc
self.encoder = Encoder(mod)
self.encode = self.encoder.encode
self.node = node
self.out = []
self.ms = []
self.mode = None
self.cur_style = 'rm'
self.cur_size = list(self.sizes).index('normalsize')
self.document_lang = None
self.document_title = None
self.document_metas = []
self.latex_list_nesting = 0
self.latex_mode = 0
self.noindent = 0
self.authors = []
node.accept(self)
def _visit_children(self, node):
E = self.mod.ReportedError
for ch in node.children:
try:
ch.accept(self)
except E:
pass
def abs_size(self, size, node):
osize = self.cur_size
si = size
if si < 0:
si = 0
elif si >= len(self.sizes):
si = len(self.sizes) - 1
self.append('{\\%s '%self.sizes[si])
self.cur_size = si
if self.cur_style != 'rm':
self.style(self.cur_style, node)
else:
node.arg_accept(self)
self.append('}')
self.cur_size = osize
def append(self, x):
self.out.append(x)
def changed_size(self, delta, node):
self.abs_size(self.cur_size + delta, node)
def error(self, msg, *args, **kwds):
msg = 'Doc2Latex: ' + msg
self.doc.env.error(msg, *args, **kwds)
def get_latex(self):
return ''.join(self.out)
def no_children(self, node):
if node.children:
self.error('No children allowed for %r.'%node.tag, node.children[0])
def style(self, style, node):
self.append('{\\%s '%style)
ostyle = self.cur_style
self.cur_style = style
node.arg_accept(self)
self.cur_style = ostyle
if style == 'em':
self.append('\\/}')
else:
self.append('\\/}')
def visit_a(self, node):
pass
def visit_author(self, node):
self.authors.append(node.arg)
self.no_children(node)
def visit_big(self, node):
self.changed_size(1, node)
def visit_block(self, node):
self._visit_children(node)
def visit_blockquote(self, node):
self.append('\\begin{quote}\n')
self.latex_list_nesting += 1
node.arg_accept(self)
self.latex_list_nesting -= 1
self.append('\\end{quote}\n')
char_table = {
'nbsp' : '~',
}
def visit_char(self, node):
char = node.arg.strip()
c = self.char_table.get(char)
if c is None:
self.error('No such character: %r.'%char, node)
c = char
self.append(c)
def visit_code(self, node):
self.style('tt', node)
def visit_comment(self, node):
pass
def visit_dd(self, node):
self.ms.append('dd')
step = 24
ls = (self.ms.count('dd') + self.latex_list_nesting) * step
self.append('{\\par \\noindent \\leftskip = %d pt '%ls)
for i, v in enumerate(('i', 'ii', 'iii', 'iv', 'v', 'vi')[self.latex_list_nesting:]):
self.append(' \\leftmargin%s = %d pt '%(v, ls + (i + 1) * step))
node.arg_accept(self)
self.append('\\par}\n')
self.ms.pop()
def visit_default(self, node):
self.error('I don\'t know what to generate for the tag %r.'%node.tag, node)
def visit_define(self, node):
# xxx
self._visit_children(node)
def visit_dl(self, node):
if self.ms and self.ms[-1] == 'dt':
self.visit_dd(node)
else:
self.append('{\\par \\noindent\n')
self._visit_children(node)
self.append('\\par}\n')
def visit_dt(self, node):
self.ms.append('dt')
self.append('{\\par \\pagebreak[%f] \\noindent \\hangindent = 12 pt \\hangafter = 1 \n'%(
3.4-0.1*len(self.ms),
))
node.arg_accept(self)
self.append('\\par}\n')
self.ms.pop()
def visit_document(self, node):
self._visit_children(node)
def visit_document_lang(self, node):
if self.document_lang is not None:
self.error('Duplicate document lang directive.', node)
self.document_lang = node
def visit_document_title(self, node):
if self.document_title is not None:
self.error('Duplicate document title directive.', node)
self.document_title = node
def visit_exdefs(self, node):
self.symplace = {}
for ch in node.children:
syms = [x.strip() for x in ch.arg.split(',')]
for sym in syms:
self.symplace[sym] = ch.tag
def visit_em(self, node):
self.style('em', node)
def visit_enumerate(self, node):
self.append('\\begin{enumerate}\n')
for c in node.children:
self.append('\\item ')
c.accept(self)
self.append('\\end{enumerate}\n')
def visit_h0(self, node):
# Not a html header,
# we may treat this as 'new page' or chapter here
# and some larger divisor in html.
self.visit_hx(node)
def visit_h1(self, node):
self.visit_hx(node)
def visit_h2(self, node):
self.visit_hx(node)
def visit_h3(self, node):
self.visit_hx(node)
def visit_h4(self, node):
self.visit_hx(node)
def visit_h5(self, node):
self.visit_hx(node)
def visit_h6(self, node):
self.visit_hx(node)
def visit_hx(self, node):
n = int(node.tag[1:])
if self.mode == 'man_page':
self.append('{\\par \\pagebreak[%d] \\vskip %d pt \\noindent\n' % (
[4,3,3,2,2,1,1][n],
(12 - 2 * n)))
self.abs_size(len(self.sizes) - n - 2, self.mod.node_of_taci(
'', '',
[self.mod.node_of_taci('strong', node.arg, node.children)]))
self.append('\\par \\vskip %d pt\n} \\noindent\n'%(12 - 2 * n))
self.noindent = 1
#self.append('\\end{list}\n')
else:
self.append('\\%s{'%self.mod.section_table[n])
node.arg_accept(self)
self.append('}\n')
def visit_itemize(self, node):
self.append('\\begin{itemize}\n')
self.latex_list_nesting += 1
for c in node.children:
self.append('\\item ')
c.accept(self)
self.latex_list_nesting -= 1
self.append('\\end{itemize}\n')
def visit_latex(self, node):
self.latex_mode += 1
node.arg_accept(self)
self.latex_mode -= 1
def visit_li(self, node):
self.append('\\item ')
node.arg_accept(self)
def visit_link_to(self, node):
# xxx
name = node.arg
self.append(' {\\em ')
if not node.children:
self.append(self.encode(name))
else:
self._visit_children(node)
self.append('\\/}')
def visit_link_to_extern(self, node):
# xxx
name = node.arg
doc = node.children[0].arg
children = node.children[1:]
self.append(' {\\em ')
if not children:
self.append(self.encode(name))
else:
for ch in children:
ch.accept(self)
self.append('\\/}')
def visit_link_to_local(self, node):
# xxx
name = node.arg
self.append(' {\\em ')
if not node.children:
self.append(self.encode(name))
else:
self._visit_children(node)
self.append('\\/}')
def visit_link_to_unresolved(self, node):
# xxx
name = node.arg
self.append(' {\\em ')
if not node.children:
self.append(self.encode(name))
else:
self._visit_children(node)
self.append('\\/}')
def visit_literal_block(self, node):
self.append('{\\ttfamily \\raggedright \\noindent')
self.encoder.literal_block = 1
self.encoder.insert_none_breaking_blanks = 1
node.arg_accept(self)
self.encoder.literal_block = 0
self.encoder.insert_none_breaking_blanks = 0
self.append('}\n')
def visit_lp(self, node):
self.latex_mode += 1
self.visit_paragraph(node)
self.latex_mode -= 1
def visit_man_page_mode(self, node):
omode = self.mode
self.mode = 'man_page'
self._visit_children(node)
self.mode = omode
def visit_meta(self, node):
self.document_metas.append(node)
def visit_ol(self, node):
self.append('\\begin{enumerate}\n')
self._visit_children(node)
self.append('\\end{enumerate}\n')
def visit_p(self, node):
self.visit_paragraph(node)
def visit_paragraph(self, node):
self.append('{\\par ')
if self.noindent:
self.append('\\parindent = 0 pt ')
self.noindent = 0
self.append('\n')
node.arg_accept(self)
self.append(' \\par}\n')
def visit_pre(self, node):
# I couldn't use Latex verbatim environment
# since it didn't respected leftskip
# so the environment became misplaced (within dd)
text = node.arg.strip()
if text:
text += '\n'
text = text + node.get_text()
text = text.expandtabs()
lines = text.split('\n')
if lines and not lines[-1]:
lines.pop()
if not lines:
return
self.append('\\par\n')
self.encoder.insert_none_breaking_blanks += 1
self.encoder.literal+=1
first = 1
self.append('{\\tt{%s}}\n'%self.encode(lines[0]))
for line in lines[1:]:
self.append(
'{ \\par \\parindent = 0 pt \\parskip = 0 pt \\tt{%s} }\n'%
self.encode(line))
self.encoder.insert_none_breaking_blanks -= 1
self.encoder.literal -= 1
self.append('\\par\n')
def visit_small(self, node):
self.changed_size(-1, node)
def visit_spc_colonkind(self, node):
self.append('~{\\bf :} ')
def visit_spc_mapsto(self, node):
self.append(' \\(\mapsto \\) ')
def visit_string(self, node):
self._visit_children(node)
def visit_strong(self, node):
self.style('bf', node)
def visit_sub(self, node):
self.append('\\raisebox{-.6ex}{')
self.changed_size(-1, node)
self.append('}')
def visit_sup(self, node):
self.append('\\raisebox{.6ex}{')
self.changed_size(-1, node)
self.append('}')
def visit_symbol(self, node):
self.visit_text(node)
def visit_table(self, node):
Table(self, node)
def visit_text(self, node):
if self.latex_mode:
self.append(node.arg)
elif 1:
text = node.arg
text = self.encoder.encode(text)
self.append(text)
else:
for ch in node.arg:
if ch == '\\':
ch = '{\\textbackslash}'
elif ch in '{}#~':
ch = '\\'+ch
self.append(ch)
self.append('\n')
self._visit_children(node)
def visit_to_document_only(self, node):
self._visit_children(node)
def visit_to_html_only(self, node):
pass
def visit_to_tester_only(self, node):
pass
def visit_tt(self, node):
self.append('\\texttt{')
self.encoder.literal = 1
node.arg_accept(self)
self.encoder.literal = 0
self.append('}')
def visit_ul(self, node):
self.append('\\begin{itemize}\n')
self._visit_children(node)
self.append('\\end{itemize}\n')
def visit_var(self, node):
self.style('em', node)
class Table(Doc2Latex):
many_hlines = 1 # Use extra many hlines.. looks good, a matter of taste.
def __init__(self, d2l, node):
self.d2l = d2l
self.__dict__.update(d2l.__dict__)
self.node = node
self.out = []
self.rows = []
self.colwidth = None
self._visit_children(node)
maxcols = 0
for row in self.rows:
if len(row.columns) > maxcols:
maxcols = len(row.columns)
if not maxcols:
return # Empty table
if self.colwidth is not None:
if not len(self.colwidth) == maxcols:
self.error("Wrong number of column width specifications (%d) vs\n"
" max columns in table (%d)."%(len(self.colwidth), maxcols),
node)
else:
self.colwidth = [1.0/maxcols]*maxcols
ap = self.d2l.append
ap('\n\\begin{longtable}[c]{|%s|}\n'%('|'.join(['p{%.2g\\linewidth}'%cw
for cw in self.colwidth])))
if self.many_hlines:
ap('\\hline\n')
for row in self.rows:
for col in row.columns:
ap(''.join(col.data))
if col is row.columns[-1]:
if self.many_hlines:
ap('\\\\\n')
ap('\\hline\n')
else:
if row is not self.rows[-1]:
ap('\\\\\n')
else:
ap('&\n')
if row.is_head:
ap('\\hline\n')
ap('\\endhead\n')
ap('\n\\end{longtable}\n')
def visit_colgroup(self, node):
colwidth = []
for c in node.children:
if c.tag != "col_width":
self.error('Unrecognized colgroup option: %r'%c.tag, c)
cg = c.arg
if cg.endswith('%'):
cg = cg[:-1]
cg = float(cg)/100.0
else:
cg = float(cg)
colwidth.append(cg)
self.colwidth = colwidth
def visit_options(self, node):
pass
def visit_thead(self, node):
self._visit_children(node)
self.rows[-1].is_head = 1
def visit_tr(self, node):
self.rows.append(Row(self, node))
class Row(Doc2Latex):
is_head = 0
def __init__(self, table, node):
self.__dict__.update(table.__dict__)
self.columns = []
self._visit_children(node)
def visit_td(self, node):
self.columns.append(Column(self, node))
def visit_th(self, node):
self.columns.append(Column(self, node))
class Column(Doc2Latex):
def __init__(self, row, node):
self.__dict__.update(row.__dict__)
self.data = []
self.append = self.data.append
node.arg_accept(self)
class Babel:
"""Language specifics for LaTeX."""
# country code by a.schlock.
# partly manually converted from iso and babel stuff, dialects and some
_ISO639_TO_BABEL = {
'no': 'norsk', #XXX added by hand ( forget about nynorsk?)
'gd': 'scottish', #XXX added by hand
'hu': 'magyar', #XXX added by hand
'pt': 'portuguese',#XXX added by hand
'sl': 'slovenian',
'af': 'afrikaans',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'fr': 'french',
# french, francais, canadien, acadian
'de': 'ngerman', #XXX rather than german
# ngerman, naustrian, german, germanb, austrian
'el': 'greek',
'en': 'english',
# english, USenglish, american, UKenglish, british, canadian
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
'fi': 'finnish',
'ga': 'irish',
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hu': 'hungarian',
'is': 'icelandic',
'it': 'italian',
'la': 'latin',
'nl': 'dutch',
'pl': 'polish',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'sk': 'slovak',
'sr': 'serbian',
'sv': 'swedish',
'tr': 'turkish',
'uk': 'ukrainian'
}
def __init__(self, mod):
self.language = mod.language_code
self.re = mod.re
# pdflatex does not produce double quotes for ngerman in tt.
self.double_quote_replacment = None
if self.re.search('^de',self.language):
#self.quotes = ("\"`", "\"'")
self.quotes = ('{\\glqq}', '{\\grqq}')
self.double_quote_replacment = "{\\dq}"
else:
self.quotes = ("``", "''")
self.quote_index = 0
def next_quote(self):
q = self.quotes[self.quote_index]
self.quote_index = (self.quote_index+1)%2
return q
def quote_quotes(self,text):
t = None
for part in text.split('"'):
if t == None:
t = part
else:
t += self.next_quote() + part
return t
def double_quotes_in_tt (self,text):
if not self.double_quote_replacment:
return text
return text.replace('"', self.double_quote_replacment)
def get_language(self):
if self._ISO639_TO_BABEL.has_key(self.language):
return self._ISO639_TO_BABEL[self.language]
else:
# support dialects.
l = self.language.split("_")[0]
if self._ISO639_TO_BABEL.has_key(l):
return self._ISO639_TO_BABEL[l]
return None
class Encoder:
literal_block = 0
literal = 0
mathmode = 0
verbatim = 0
insert_newline = 0
mbox_newline = 0
insert_none_breaking_blanks = 0
latex_equivalents = {
u'\u00A0' : '~',
u'\u2013' : '{--}',
u'\u2014' : '{---}',
u'\u2018' : '`',
u'\u2019' : '\'',
u'\u201A' : ',',
u'\u201C' : '``',
u'\u201D' : '\'\'',
u'\u201E' : ',,',
u'\u2020' : '{\\dag}',
u'\u2021' : '{\\ddag}',
u'\u2026' : '{\\dots}',
u'\u2122' : '{\\texttrademark}',
u'\u21d4' : '{$\\Leftrightarrow$}',
}
def __init__(self, mod):
self.mod = mod
self.re = mod.re
self.babel = Babel(mod)
self.font_encoding = mod.font_encoding
self.latex_encoding = self.to_latex_encoding(mod.output_encoding)
def to_latex_encoding(self,docutils_encoding):
"""
Translate docutils encoding name into latex's.
Default fallback method is remove "-" and "_" chars from docutils_encoding.
"""
tr = { "iso-8859-1": "latin1", # west european
"iso-8859-2": "latin2", # east european
"iso-8859-3": "latin3", # esperanto, maltese
"iso-8859-4": "latin4", # north european,scandinavian, baltic
"iso-8859-5": "iso88595", # cyrillic (ISO)
"iso-8859-9": "latin5", # turkish
"iso-8859-15": "latin9", # latin9, update to latin1.
"mac_cyrillic": "maccyr", # cyrillic (on Mac)
"windows-1251": "cp1251", # cyrillic (on Windows)
"koi8-r": "koi8-r", # cyrillic (Russian)
"koi8-u": "koi8-u", # cyrillic (Ukrainian)
"windows-1250": "cp1250", #
"windows-1252": "cp1252", #
"us-ascii": "ascii", # ASCII (US)
# unmatched encodings
#"": "applemac",
#"": "ansinew", # windows 3.1 ansi
#"": "ascii", # ASCII encoding for the range 32--127.
#"": "cp437", # dos latine us
#"": "cp850", # dos latin 1
#"": "cp852", # dos latin 2
#"": "decmulti",
#"": "latin10",
#"iso-8859-6": "" # arabic
#"iso-8859-7": "" # greek
#"iso-8859-8": "" # hebrew
#"iso-8859-10": "" # latin6, more complete iso-8859-4
}
if tr.has_key(docutils_encoding.lower()):
return tr[docutils_encoding.lower()]
return docutils_encoding.translate(self.mod.string.maketrans("",""),"_-").lower()
def unicode_to_latex(self,text):
# see LaTeX codec
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
# Only some special chracters are translated, for documents with many
# utf-8 chars one should use the LaTeX unicode package.
for uchar in self.latex_equivalents.keys():
text = text.replace(uchar,self.latex_equivalents[uchar])
return text
def encode(self, text):
"""
Encode special characters in `text` & return.
# $ % & ~ _ ^ \ { }
Escaping with a backslash does not help with backslashes, ~ and ^.
< > are only available in math-mode or tt font. (really ?)
$ starts math- mode.
AND quotes:
"""
if self.verbatim:
return text
# compile the regexps once. do it here so one can see them.
#
# first the braces.
if not self.__dict__.has_key('encode_re_braces'):
self.encode_re_braces = self.re.compile(r'([{}])')
text = self.encode_re_braces.sub(r'{\\\1}',text)
if not self.__dict__.has_key('encode_re_bslash'):
# find backslash: except in the form '{\{}' or '{\}}'.
self.encode_re_bslash = self.re.compile(r'(?<!{)(\\)(?![{}]})')
# then the backslash: except in the form from line above:
# either '{\{}' or '{\}}'.
text = self.encode_re_bslash.sub(r'{\\textbackslash}', text)
# then dollar
text = text.replace("$", '{\\$}')
if not ( self.literal_block or self.literal or self.mathmode ):
# the vertical bar: in mathmode |,\vert or \mid
# in textmode \textbar
text = text.replace("|", '{\\textbar}')
text = text.replace("<", '{\\textless}')
text = text.replace(">", '{\\textgreater}')
# then
text = text.replace("&", '{\\&}')
# the ^:
# * verb|^| does not work in mbox.
# * mathmode has wedge. hat{~} would also work.
# text = text.replace("^", '{\\ensuremath{^\\wedge}}')
text = text.replace("^", '{\\textasciicircum}')
text = text.replace("%", '{\\%}')
text = text.replace("#", '{\\#}')
text = text.replace("~", '{\\textasciitilde}')
# Separate compound characters, e.g. "--" to "-{}-". (The
# actual separation is done later; see below.)
separate_chars = '-'
if self.literal_block or self.literal:
# In monospace-font, we also separate ",,", "``" and "''"
# and some other characters which can't occur in
# non-literal text.
separate_chars += ',`\'"<>'
# pdflatex does not produce doublequotes for ngerman.
text = self.babel.double_quotes_in_tt(text)
if self.font_encoding == 'OT1':
# We're using OT1 font-encoding and have to replace
# underscore by underlined blank, because this has
# correct width.
text = text.replace('_', '{\\underline{ }}')
# And the tt-backslash doesn't work in OT1, so we use
# a mirrored slash.
text = text.replace('\\textbackslash', '\\reflectbox{/}')
else:
text = text.replace('_', '{\\_}')
else:
text = self.babel.quote_quotes(text)
text = text.replace("_", '{\\_}')
for char in separate_chars * 2:
# Do it twice ("* 2") becaues otherwise we would replace
# "---" by "-{}--".
text = text.replace(char + char, char + '{}' + char)
if self.insert_newline or self.literal_block:
# Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
text = text.replace("\n", '~\\\\\n')
elif self.mbox_newline:
if self.literal_block:
closings = "}" * len(self.literal_block_stack)
openings = "".join(self.literal_block_stack)
else:
closings = ""
openings = ""
text = text.replace("\n", "%s}\\\\\n\\mbox{%s" % (closings,openings))
# lines starting with "[" give errors.
text = text.replace('[', '{[}')
if self.insert_none_breaking_blanks:
text = text.replace(' ', '~')
if self.latex_encoding != 'utf8':
text = self.unicode_to_latex(text)
return text
class _GLUECLAMP_:
_imports_ = (
'_parent:SpecNodes',
'_parent.SpecNodes:node_of_taci',
'_parent.Main:ReportedError',
'_root:re',
'_root:string',
)
font_encoding = ''
double_quote_replacment = ''
language_code = ''
output_encoding = ''
section_table = {
0:'part',
1:'chapter',
2:'section',
3:'subsection',
4:'subsubsection',
5:'paragraph',
6:'subparagraph'
}
def doc2text(self, doc, node):
d2l = Doc2Latex(self, doc, node)
return d2l.get_latex()
def doc2filer(self, doc, node, name, dir, opts, IO):
text = self.doc2text(doc, node)
path = IO.path.join(dir, '%s.tex'%name)
node = self.node_of_taci('write_file', path, [self.node_of_taci('text', text)])
return node
| apache-2.0 |
evanma92/routeh | flask/lib/python2.7/site-packages/pbr/tests/test_hooks.py | 64 | 4039 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
import os
import textwrap
from testtools.content import text_content
from testtools.matchers import Contains, EndsWith
from pbr.tests import base
from pbr.tests import util
class TestHooks(base.BaseTestCase):
def setUp(self):
super(TestHooks, self).setUp()
with util.open_config(
os.path.join(self.package_dir, 'setup.cfg')) as cfg:
cfg.set('global', 'setup-hooks',
'pbr_testpackage._setup_hooks.test_hook_1\n'
'pbr_testpackage._setup_hooks.test_hook_2')
cfg.set('build_ext', 'pre-hook.test_pre_hook',
'pbr_testpackage._setup_hooks.test_pre_hook')
cfg.set('build_ext', 'post-hook.test_post_hook',
'pbr_testpackage._setup_hooks.test_post_hook')
def test_global_setup_hooks(self):
"""Test setup_hooks.
Test that setup_hooks listed in the [global] section of setup.cfg are
executed in order.
"""
stdout, _, return_code = self.run_setup('egg_info')
assert 'test_hook_1\ntest_hook_2' in stdout
assert return_code == 0
def test_command_hooks(self):
"""Test command hooks.
Simple test that the appropriate command hooks run at the
beginning/end of the appropriate command.
"""
stdout, _, return_code = self.run_setup('egg_info')
assert 'build_ext pre-hook' not in stdout
assert 'build_ext post-hook' not in stdout
assert return_code == 0
stdout, stderr, return_code = self.run_setup('build_ext')
self.addDetailUniqueName('stderr', text_content(stderr))
assert textwrap.dedent("""
running build_ext
running pre_hook pbr_testpackage._setup_hooks.test_pre_hook for command build_ext
build_ext pre-hook
""") in stdout # flake8: noqa
self.expectThat(stdout, EndsWith('build_ext post-hook'))
assert return_code == 0
def test_custom_commands_known(self):
stdout, _, return_code = self.run_setup('--help-commands')
self.assertFalse(return_code)
self.assertThat(stdout, Contains(" testr "))
| bsd-3-clause |
tdtrask/ansible | lib/ansible/modules/network/cloudengine/ce_bgp_neighbor_af.py | 22 | 109157 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_bgp_neighbor_af
version_added: "2.4"
short_description: Manages BGP neighbor Address-family configuration on HUAWEI CloudEngine switches.
description:
- Manages BGP neighbor Address-family configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
vrf_name:
description:
- Name of a BGP instance. The name is a case-sensitive string of characters.
The BGP instance can be used only after the corresponding VPN instance is created.
required: true
af_type:
description:
- Address family type of a BGP instance.
required: true
choices: ['ipv4uni', 'ipv4multi', 'ipv4vpn', 'ipv6uni', 'ipv6vpn', 'evpn']
remote_address:
description:
- IPv4 or IPv6 peer connection address.
required: true
advertise_irb:
description:
- If the value is true, advertised IRB routes are distinguished.
If the value is false, advertised IRB routes are not distinguished.
required: false
default: no_use
choices: ['no_use','true','false']
advertise_arp:
description:
- If the value is true, advertised ARP routes are distinguished.
If the value is false, advertised ARP routes are not distinguished.
required: false
default: no_use
choices: ['no_use','true','false']
advertise_remote_nexthop:
description:
- If the value is true, the remote next-hop attribute is advertised to peers.
If the value is false, the remote next-hop attribute is not advertised to any peers.
required: false
default: no_use
choices: ['no_use','true','false']
advertise_community:
description:
- If the value is true, the community attribute is advertised to peers.
If the value is false, the community attribute is not advertised to peers.
required: false
default: no_use
choices: ['no_use','true','false']
advertise_ext_community:
description:
- If the value is true, the extended community attribute is advertised to peers.
If the value is false, the extended community attribute is not advertised to peers.
required: false
default: no_use
choices: ['no_use','true','false']
discard_ext_community:
description:
- If the value is true, the extended community attribute in the peer route information is discarded.
If the value is false, the extended community attribute in the peer route information is not discarded.
required: false
default: no_use
choices: ['no_use','true','false']
allow_as_loop_enable:
description:
- If the value is true, repetitive local AS numbers are allowed.
If the value is false, repetitive local AS numbers are not allowed.
required: false
default: no_use
choices: ['no_use','true','false']
allow_as_loop_limit:
description:
- Set the maximum number of repetitive local AS number.
The value is an integer ranging from 1 to 10.
required: false
default: null
keep_all_routes:
description:
- If the value is true, the system stores all route update messages received from all peers (groups)
after BGP connection setup.
If the value is false, the system stores only BGP update messages that are received from peers
and pass the configured import policy.
required: false
default: no_use
choices: ['no_use','true','false']
nexthop_configure:
description:
- null, The next hop is not changed.
local, The next hop is changed to the local IP address.
invariable, Prevent the device from changing the next hop of each imported IGP route
when advertising it to its BGP peers.
required: false
default: null
choices: ['null', 'local', 'invariable']
preferred_value:
description:
- Assign a preferred value for the routes learned from a specified peer.
The value is an integer ranging from 0 to 65535.
required: false
default: null
public_as_only:
description:
- If the value is true, sent BGP update messages carry only the public AS number but do not carry
private AS numbers.
If the value is false, sent BGP update messages can carry private AS numbers.
required: false
default: no_use
choices: ['no_use','true','false']
public_as_only_force:
description:
- If the value is true, sent BGP update messages carry only the public AS number but do not carry
private AS numbers.
If the value is false, sent BGP update messages can carry private AS numbers.
required: false
default: no_use
choices: ['no_use','true','false']
public_as_only_limited:
description:
- Limited use public as number.
required: false
default: no_use
choices: ['no_use','true','false']
public_as_only_replace:
description:
- Private as replaced by public as number.
required: false
default: no_use
choices: ['no_use','true','false']
public_as_only_skip_peer_as:
description:
- Public as only skip peer as.
required: false
default: no_use
choices: ['no_use','true','false']
route_limit:
description:
- Configure the maximum number of routes that can be accepted from a peer.
The value is an integer ranging from 1 to 4294967295.
required: false
default: null
route_limit_percent:
description:
- Specify the percentage of routes when a router starts to generate an alarm.
The value is an integer ranging from 1 to 100.
required: false
default: null
route_limit_type:
description:
- Noparameter, After the number of received routes exceeds the threshold and the timeout
timer expires,no action.
AlertOnly, An alarm is generated and no additional routes will be accepted if the maximum
number of routes allowed have been received.
IdleForever, The connection that is interrupted is not automatically re-established if the
maximum number of routes allowed have been received.
IdleTimeout, After the number of received routes exceeds the threshold and the timeout timer
expires, the connection that is interrupted is automatically re-established.
required: false
default: null
choices: ['noparameter', 'alertOnly', 'idleForever', 'idleTimeout']
route_limit_idle_timeout:
description:
- Specify the value of the idle-timeout timer to automatically reestablish the connections after
they are cut off when the number of routes exceeds the set threshold.
The value is an integer ranging from 1 to 1200.
required: false
default: null
rt_updt_interval:
description:
- Specify the minimum interval at which Update packets are sent. The value is an integer, in seconds.
The value is an integer ranging from 0 to 600.
required: false
default: null
redirect_ip:
description:
- Redirect ip.
required: false
default: no_use
choices: ['no_use','true','false']
redirect_ip_vaildation:
description:
- Redirect ip vaildation.
required: false
default: no_use
choices: ['no_use','true','false']
reflect_client:
description:
- If the value is true, the local device functions as the route reflector and a peer functions
as a client of the route reflector.
If the value is false, the route reflector and client functions are not configured.
required: false
default: no_use
choices: ['no_use','true','false']
substitute_as_enable:
description:
- If the value is true, the function to replace a specified peer's AS number in the AS-Path attribute with
the local AS number is enabled.
If the value is false, the function to replace a specified peer's AS number in the AS-Path attribute with
the local AS number is disabled.
required: false
default: no_use
choices: ['no_use','true','false']
import_rt_policy_name:
description:
- Specify the filtering policy applied to the routes learned from a peer.
The value is a string of 1 to 40 characters.
required: false
default: null
export_rt_policy_name:
description:
- Specify the filtering policy applied to the routes to be advertised to a peer.
The value is a string of 1 to 40 characters.
required: false
default: null
import_pref_filt_name:
description:
- Specify the IPv4 filtering policy applied to the routes received from a specified peer.
The value is a string of 1 to 169 characters.
required: false
default: null
export_pref_filt_name:
description:
- Specify the IPv4 filtering policy applied to the routes to be advertised to a specified peer.
The value is a string of 1 to 169 characters.
required: false
default: null
import_as_path_filter:
description:
- Apply an AS_Path-based filtering policy to the routes received from a specified peer.
The value is an integer ranging from 1 to 256.
required: false
default: null
export_as_path_filter:
description:
- Apply an AS_Path-based filtering policy to the routes to be advertised to a specified peer.
The value is an integer ranging from 1 to 256.
required: false
default: null
import_as_path_name_or_num:
description:
- A routing strategy based on the AS path list for routing received by a designated peer.
required: false
default: null
export_as_path_name_or_num:
description:
- Application of a AS path list based filtering policy to the routing of a specified peer.
required: false
default: null
import_acl_name_or_num:
description:
- Apply an IPv4 ACL-based filtering policy to the routes received from a specified peer.
The value is a string of 1 to 32 characters.
required: false
default: null
export_acl_name_or_num:
description:
- Apply an IPv4 ACL-based filtering policy to the routes to be advertised to a specified peer.
The value is a string of 1 to 32 characters.
required: false
default: null
ipprefix_orf_enable:
description:
- If the value is true, the address prefix-based Outbound Route Filter (ORF) capability is
enabled for peers.
If the value is false, the address prefix-based Outbound Route Filter (ORF) capability is
disabled for peers.
required: false
default: no_use
choices: ['no_use','true','false']
is_nonstd_ipprefix_mod:
description:
- If the value is true, Non-standard capability codes are used during capability negotiation.
If the value is false, RFC-defined standard ORF capability codes are used during capability negotiation.
required: false
default: no_use
choices: ['no_use','true','false']
orftype:
description:
- ORF Type.
The value is an integer ranging from 0 to 65535.
required: false
default: null
orf_mode:
description:
- ORF mode.
null, Default value.
receive, ORF for incoming packets.
send, ORF for outgoing packets.
both, ORF for incoming and outgoing packets.
required: false
default: null
choices: ['null', 'receive', 'send', 'both']
soostring:
description:
- Configure the Site-of-Origin (SoO) extended community attribute.
The value is a string of 3 to 21 characters.
required: false
default: null
default_rt_adv_enable:
description:
- If the value is true, the function to advertise default routes to peers is enabled.
If the value is false, the function to advertise default routes to peers is disabled.
required: false
default: no_use
choices: ['no_use','true', 'false']
default_rt_adv_policy:
description:
- Specify the name of a used policy. The value is a string.
The value is a string of 1 to 40 characters.
required: false
default: null
default_rt_match_mode:
description:
- null, Null.
matchall, Advertise the default route if all matching conditions are met.
matchany, Advertise the default route if any matching condition is met.
required: false
default: null
choices: ['null', 'matchall', 'matchany']
add_path_mode:
description:
- null, Null.
receive, Support receiving Add-Path routes.
send, Support sending Add-Path routes.
both, Support receiving and sending Add-Path routes.
required: false
default: null
choices: ['null', 'receive', 'send', 'both']
adv_add_path_num:
description:
- The number of addPath advertise route.
The value is an integer ranging from 2 to 64.
required: false
default: null
origin_as_valid:
description:
- If the value is true, Application results of route announcement.
If the value is false, Routing application results are not notified.
required: false
default: no_use
choices: ['no_use','true', 'false']
vpls_enable:
description:
- If the value is true, vpls enable.
If the value is false, vpls disable.
required: false
default: no_use
choices: ['no_use','true', 'false']
vpls_ad_disable:
description:
- If the value is true, enable vpls-ad.
If the value is false, disable vpls-ad.
required: false
default: no_use
choices: ['no_use','true', 'false']
update_pkt_standard_compatible:
description:
- If the value is true, When the vpnv4 multicast neighbor receives and updates the message,
the message has no label.
If the value is false, When the vpnv4 multicast neighbor receives and updates the message,
the message has label.
required: false
default: no_use
choices: ['no_use','true', 'false']
'''
EXAMPLES = '''
- name: CloudEngine BGP neighbor address family test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config BGP peer Address_Family"
ce_bgp_neighbor_af:
state: present
vrf_name: js
af_type: ipv4uni
remote_address: 192.168.10.10
nexthop_configure: local
provider: "{{ cli }}"
- name: "Undo BGP peer Address_Family"
ce_bgp_neighbor_af:
state: absent
vrf_name: js
af_type: ipv4uni
remote_address: 192.168.10.10
nexthop_configure: local
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"af_type": "ipv4uni", "nexthop_configure": "local",
"remote_address": "192.168.10.10",
"state": "present", "vrf_name": "js"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"bgp neighbor af": {"af_type": "ipv4uni", "remote_address": "192.168.10.10",
"vrf_name": "js"},
"bgp neighbor af other": {"af_type": "ipv4uni", "nexthop_configure": "null",
"vrf_name": "js"}}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"bgp neighbor af": {"af_type": "ipv4uni", "remote_address": "192.168.10.10",
"vrf_name": "js"},
"bgp neighbor af other": {"af_type": "ipv4uni", "nexthop_configure": "local",
"vrf_name": "js"}}
updates:
description: command sent to the device
returned: always
type: list
sample: ["peer 192.168.10.10 next-hop-local"]
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr
# get bgp peer af
CE_GET_BGP_PEER_AF_HEADER = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF>
<remoteAddress></remoteAddress>
"""
CE_GET_BGP_PEER_AF_TAIL = """
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</filter>
"""
# merge bgp peer af
CE_MERGE_BGP_PEER_AF_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF operation="merge">
<remoteAddress>%s</remoteAddress>
"""
CE_MERGE_BGP_PEER_AF_TAIL = """
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# create bgp peer af
CE_CREATE_BGP_PEER_AF = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF operation="create">
<remoteAddress>%s</remoteAddress>
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# delete bgp peer af
CE_DELETE_BGP_PEER_AF = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF operation="delete">
<remoteAddress>%s</remoteAddress>
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
class BgpNeighborAf(object):
""" Manages BGP neighbor Address-family configuration """
def netconf_get_config(self, **kwargs):
""" netconf_get_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = get_nc_config(module, conf_str)
return xml_str
def netconf_set_config(self, **kwargs):
""" netconf_set_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = set_nc_config(module, conf_str)
return xml_str
def check_bgp_neighbor_af_args(self, **kwargs):
""" check_bgp_neighbor_af_args """
module = kwargs["module"]
result = dict()
need_cfg = False
vrf_name = module.params['vrf_name']
if vrf_name:
if len(vrf_name) > 31 or len(vrf_name) == 0:
module.fail_json(
msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name)
state = module.params['state']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
if not check_ip_addr(ipaddr=remote_address):
module.fail_json(
msg='Error: The remote_address %s is invalid.' % remote_address)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<remoteAddress>(.*)</remoteAddress>.*', recv_xml)
if re_find:
result["remote_address"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != remote_address:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<remoteAddress>(.*)</remoteAddress>.*', recv_xml)
if re_find:
result["remote_address"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] == remote_address:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def check_bgp_neighbor_af_other(self, **kwargs):
""" check_bgp_neighbor_af_other """
module = kwargs["module"]
result = dict()
need_cfg = False
state = module.params['state']
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
if state == "absent":
result["need_cfg"] = need_cfg
return result
advertise_irb = module.params['advertise_irb']
if advertise_irb != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseIrb></advertiseIrb>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseIrb>(.*)</advertiseIrb>.*', recv_xml)
if re_find:
result["advertise_irb"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_irb:
need_cfg = True
else:
need_cfg = True
advertise_arp = module.params['advertise_arp']
if advertise_arp != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseArp></advertiseArp>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseArp>(.*)</advertiseArp>.*', recv_xml)
if re_find:
result["advertise_arp"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_arp:
need_cfg = True
else:
need_cfg = True
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
if advertise_remote_nexthop != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseRemoteNexthop></advertiseRemoteNexthop>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseRemoteNexthop>(.*)</advertiseRemoteNexthop>.*', recv_xml)
if re_find:
result["advertise_remote_nexthop"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_remote_nexthop:
need_cfg = True
else:
need_cfg = True
advertise_community = module.params['advertise_community']
if advertise_community != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseCommunity></advertiseCommunity>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseCommunity>(.*)</advertiseCommunity>.*', recv_xml)
if re_find:
result["advertise_community"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_community:
need_cfg = True
else:
need_cfg = True
advertise_ext_community = module.params['advertise_ext_community']
if advertise_ext_community != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseExtCommunity></advertiseExtCommunity>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseExtCommunity>(.*)</advertiseExtCommunity>.*', recv_xml)
if re_find:
result["advertise_ext_community"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_ext_community:
need_cfg = True
else:
need_cfg = True
discard_ext_community = module.params['discard_ext_community']
if discard_ext_community != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<discardExtCommunity></discardExtCommunity>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<discardExtCommunity>(.*)</discardExtCommunity>.*', recv_xml)
if re_find:
result["discard_ext_community"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != discard_ext_community:
need_cfg = True
else:
need_cfg = True
allow_as_loop_enable = module.params['allow_as_loop_enable']
if allow_as_loop_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<allowAsLoopEnable></allowAsLoopEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<allowAsLoopEnable>(.*)</allowAsLoopEnable>.*', recv_xml)
if re_find:
result["allow_as_loop_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != allow_as_loop_enable:
need_cfg = True
else:
need_cfg = True
allow_as_loop_limit = module.params['allow_as_loop_limit']
if allow_as_loop_limit:
if int(allow_as_loop_limit) > 10 or int(allow_as_loop_limit) < 1:
module.fail_json(
msg='the value of allow_as_loop_limit %s is out of [1 - 10].' % allow_as_loop_limit)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<allowAsLoopLimit></allowAsLoopLimit>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<allowAsLoopLimit>(.*)</allowAsLoopLimit>.*', recv_xml)
if re_find:
result["allow_as_loop_limit"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != allow_as_loop_limit:
need_cfg = True
else:
need_cfg = True
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<keepAllRoutes></keepAllRoutes>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<keepAllRoutes>(.*)</keepAllRoutes>.*', recv_xml)
if re_find:
result["keep_all_routes"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != keep_all_routes:
need_cfg = True
else:
need_cfg = True
nexthop_configure = module.params['nexthop_configure']
if nexthop_configure:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<nextHopConfigure></nextHopConfigure>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<nextHopConfigure>(.*)</nextHopConfigure>.*', recv_xml)
if re_find:
result["nexthop_configure"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != nexthop_configure:
need_cfg = True
else:
need_cfg = True
preferred_value = module.params['preferred_value']
if preferred_value:
if int(preferred_value) > 65535 or int(preferred_value) < 0:
module.fail_json(
msg='the value of preferred_value %s is out of [0 - 65535].' % preferred_value)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<preferredValue></preferredValue>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<preferredValue>(.*)</preferredValue>.*', recv_xml)
if re_find:
result["preferred_value"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != preferred_value:
need_cfg = True
else:
need_cfg = True
public_as_only = module.params['public_as_only']
if public_as_only != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnly></publicAsOnly>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnly>(.*)</publicAsOnly>.*', recv_xml)
if re_find:
result["public_as_only"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only:
need_cfg = True
else:
need_cfg = True
public_as_only_force = module.params['public_as_only_force']
if public_as_only_force != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlyForce></publicAsOnlyForce>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlyForce>(.*)</publicAsOnlyForce>.*', recv_xml)
if re_find:
result["public_as_only_force"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_force:
need_cfg = True
else:
need_cfg = True
public_as_only_limited = module.params['public_as_only_limited']
if public_as_only_limited != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlyLimited></publicAsOnlyLimited>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlyLimited>(.*)</publicAsOnlyLimited>.*', recv_xml)
if re_find:
result["public_as_only_limited"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_limited:
need_cfg = True
else:
need_cfg = True
public_as_only_replace = module.params['public_as_only_replace']
if public_as_only_replace != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlyReplace></publicAsOnlyReplace>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlyReplace>(.*)</publicAsOnlyReplace>.*', recv_xml)
if re_find:
result["public_as_only_replace"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_replace:
need_cfg = True
else:
need_cfg = True
public_as_only_skip_peer_as = module.params[
'public_as_only_skip_peer_as']
if public_as_only_skip_peer_as != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlySkipPeerAs></publicAsOnlySkipPeerAs>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlySkipPeerAs>(.*)</publicAsOnlySkipPeerAs>.*', recv_xml)
if re_find:
result["public_as_only_skip_peer_as"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_skip_peer_as:
need_cfg = True
else:
need_cfg = True
route_limit = module.params['route_limit']
if route_limit:
if int(route_limit) < 1:
module.fail_json(
msg='the value of route_limit %s is out of [1 - 4294967295].' % route_limit)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimit></routeLimit>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimit>(.*)</routeLimit>.*', recv_xml)
if re_find:
result["route_limit"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit:
need_cfg = True
else:
need_cfg = True
route_limit_percent = module.params['route_limit_percent']
if route_limit_percent:
if int(route_limit_percent) < 1 or int(route_limit_percent) > 100:
module.fail_json(
msg='Error: The value of route_limit_percent %s is out of [1 - 100].' % route_limit_percent)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimitPercent></routeLimitPercent>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimitPercent>(.*)</routeLimitPercent>.*', recv_xml)
if re_find:
result["route_limit_percent"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit_percent:
need_cfg = True
else:
need_cfg = True
route_limit_type = module.params['route_limit_type']
if route_limit_type:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimitType></routeLimitType>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimitType>(.*)</routeLimitType>.*', recv_xml)
if re_find:
result["route_limit_type"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit_type:
need_cfg = True
else:
need_cfg = True
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
if route_limit_idle_timeout:
if int(route_limit_idle_timeout) < 1 or int(route_limit_idle_timeout) > 1200:
module.fail_json(
msg='Error: The value of route_limit_idle_timeout %s is out of '
'[1 - 1200].' % route_limit_idle_timeout)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimitIdleTimeout></routeLimitPercent>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimitIdleTimeout>(.*)</routeLimitIdleTimeout>.*', recv_xml)
if re_find:
result["route_limit_idle_timeout"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit_idle_timeout:
need_cfg = True
else:
need_cfg = True
rt_updt_interval = module.params['rt_updt_interval']
if rt_updt_interval:
if int(rt_updt_interval) < 0 or int(rt_updt_interval) > 600:
module.fail_json(
msg='Error: The value of rt_updt_interval %s is out of [0 - 600].' % rt_updt_interval)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<rtUpdtInterval></rtUpdtInterval>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<rtUpdtInterval>(.*)</rtUpdtInterval>.*', recv_xml)
if re_find:
result["rt_updt_interval"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != rt_updt_interval:
need_cfg = True
else:
need_cfg = True
redirect_ip = module.params['redirect_ip']
if redirect_ip != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<redirectIP></redirectIP>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<redirectIP>(.*)</redirectIP>.*', recv_xml)
if re_find:
result["redirect_ip"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != redirect_ip:
need_cfg = True
else:
need_cfg = True
redirect_ip_vaildation = module.params['redirect_ip_vaildation']
if redirect_ip_vaildation != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<redirectIPVaildation></redirectIPVaildation>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<redirectIPVaildation>(.*)</redirectIPVaildation>.*', recv_xml)
if re_find:
result["redirect_ip_vaildation"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != redirect_ip_vaildation:
need_cfg = True
else:
need_cfg = True
reflect_client = module.params['reflect_client']
if reflect_client != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<reflectClient></reflectClient>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<reflectClient>(.*)</reflectClient>.*', recv_xml)
if re_find:
result["reflect_client"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != reflect_client:
need_cfg = True
else:
need_cfg = True
substitute_as_enable = module.params['substitute_as_enable']
if substitute_as_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<substituteAsEnable></substituteAsEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<substituteAsEnable>(.*)</substituteAsEnable>.*', recv_xml)
if re_find:
result["substitute_as_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != substitute_as_enable:
need_cfg = True
else:
need_cfg = True
import_rt_policy_name = module.params['import_rt_policy_name']
if import_rt_policy_name:
if len(import_rt_policy_name) < 1 or len(import_rt_policy_name) > 40:
module.fail_json(
msg='Error: The len of import_rt_policy_name %s is out of [1 - 40].' % import_rt_policy_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importRtPolicyName></importRtPolicyName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importRtPolicyName>(.*)</importRtPolicyName>.*', recv_xml)
if re_find:
result["import_rt_policy_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_rt_policy_name:
need_cfg = True
else:
need_cfg = True
export_rt_policy_name = module.params['export_rt_policy_name']
if export_rt_policy_name:
if len(export_rt_policy_name) < 1 or len(export_rt_policy_name) > 40:
module.fail_json(
msg='Error: The len of export_rt_policy_name %s is out of [1 - 40].' % export_rt_policy_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportRtPolicyName></exportRtPolicyName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportRtPolicyName>(.*)</exportRtPolicyName>.*', recv_xml)
if re_find:
result["export_rt_policy_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_rt_policy_name:
need_cfg = True
else:
need_cfg = True
import_pref_filt_name = module.params['import_pref_filt_name']
if import_pref_filt_name:
if len(import_pref_filt_name) < 1 or len(import_pref_filt_name) > 169:
module.fail_json(
msg='Error: The len of import_pref_filt_name %s is out of [1 - 169].' % import_pref_filt_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importPrefFiltName></importPrefFiltName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importPrefFiltName>(.*)</importPrefFiltName>.*', recv_xml)
if re_find:
result["import_pref_filt_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_pref_filt_name:
need_cfg = True
else:
need_cfg = True
export_pref_filt_name = module.params['export_pref_filt_name']
if export_pref_filt_name:
if len(export_pref_filt_name) < 1 or len(export_pref_filt_name) > 169:
module.fail_json(
msg='Error: The len of export_pref_filt_name %s is out of [1 - 169].' % export_pref_filt_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportPrefFiltName></exportPrefFiltName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportPrefFiltName>(.*)</exportPrefFiltName>.*', recv_xml)
if re_find:
result["export_pref_filt_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_pref_filt_name:
need_cfg = True
else:
need_cfg = True
import_as_path_filter = module.params['import_as_path_filter']
if import_as_path_filter:
if int(import_as_path_filter) < 1 or int(import_as_path_filter) > 256:
module.fail_json(
msg='Error: The value of import_as_path_filter %s is out of [1 - 256].' % import_as_path_filter)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importAsPathFilter></importAsPathFilter>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importAsPathFilter>(.*)</importAsPathFilter>.*', recv_xml)
if re_find:
result["import_as_path_filter"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_as_path_filter:
need_cfg = True
else:
need_cfg = True
export_as_path_filter = module.params['export_as_path_filter']
if export_as_path_filter:
if int(export_as_path_filter) < 1 or int(export_as_path_filter) > 256:
module.fail_json(
msg='Error: The value of export_as_path_filter %s is out of [1 - 256].' % export_as_path_filter)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportAsPathFilter></exportAsPathFilter>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportAsPathFilter>(.*)</exportAsPathFilter>.*', recv_xml)
if re_find:
result["export_as_path_filter"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_as_path_filter:
need_cfg = True
else:
need_cfg = True
import_as_path_name_or_num = module.params[
'import_as_path_name_or_num']
if import_as_path_name_or_num:
if len(import_as_path_name_or_num) < 1 or len(import_as_path_name_or_num) > 51:
module.fail_json(
msg='Error: The len of import_as_path_name_or_num %s is out '
'of [1 - 51].' % import_as_path_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importAsPathNameOrNum></importAsPathNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importAsPathNameOrNum>(.*)</importAsPathNameOrNum>.*', recv_xml)
if re_find:
result["import_as_path_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_as_path_name_or_num:
need_cfg = True
else:
need_cfg = True
export_as_path_name_or_num = module.params[
'export_as_path_name_or_num']
if export_as_path_name_or_num:
if len(export_as_path_name_or_num) < 1 or len(export_as_path_name_or_num) > 51:
module.fail_json(
msg='Error: The len of export_as_path_name_or_num %s is out '
'of [1 - 51].' % export_as_path_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportAsPathNameOrNum></exportAsPathNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportAsPathNameOrNum>(.*)</exportAsPathNameOrNum>.*', recv_xml)
if re_find:
result["export_as_path_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_as_path_name_or_num:
need_cfg = True
else:
need_cfg = True
import_acl_name_or_num = module.params['import_acl_name_or_num']
if import_acl_name_or_num:
if len(import_acl_name_or_num) < 1 or len(import_acl_name_or_num) > 32:
module.fail_json(
msg='Error: The len of import_acl_name_or_num %s is out of [1 - 32].' % import_acl_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importAclNameOrNum></importAclNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importAclNameOrNum>(.*)</importAclNameOrNum>.*', recv_xml)
if re_find:
result["import_acl_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_acl_name_or_num:
need_cfg = True
else:
need_cfg = True
export_acl_name_or_num = module.params['export_acl_name_or_num']
if export_acl_name_or_num:
if len(export_acl_name_or_num) < 1 or len(export_acl_name_or_num) > 32:
module.fail_json(
msg='Error: The len of export_acl_name_or_num %s is out of [1 - 32].' % export_acl_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportAclNameOrNum></exportAclNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportAclNameOrNum>(.*)</exportAclNameOrNum>.*', recv_xml)
if re_find:
result["export_acl_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_acl_name_or_num:
need_cfg = True
else:
need_cfg = True
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
if ipprefix_orf_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<ipprefixOrfEnable></ipprefixOrfEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<ipprefixOrfEnable>(.*)</ipprefixOrfEnable>.*', recv_xml)
if re_find:
result["ipprefix_orf_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != ipprefix_orf_enable:
need_cfg = True
else:
need_cfg = True
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
if is_nonstd_ipprefix_mod != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<isNonstdIpprefixMod></isNonstdIpprefixMod>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<isNonstdIpprefixMod>(.*)</isNonstdIpprefixMod>.*', recv_xml)
if re_find:
result["is_nonstd_ipprefix_mod"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != is_nonstd_ipprefix_mod:
need_cfg = True
else:
need_cfg = True
orftype = module.params['orftype']
if orftype:
if int(orftype) < 0 or int(orftype) > 65535:
module.fail_json(
msg='Error: The value of orftype %s is out of [0 - 65535].' % orftype)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<orftype></orftype>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<orftype>(.*)</orftype>.*', recv_xml)
if re_find:
result["orftype"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != orftype:
need_cfg = True
else:
need_cfg = True
orf_mode = module.params['orf_mode']
if orf_mode:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<orfMode></orfMode>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<orfMode>(.*)</orfMode>.*', recv_xml)
if re_find:
result["orf_mode"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != orf_mode:
need_cfg = True
else:
need_cfg = True
soostring = module.params['soostring']
if soostring:
if len(soostring) < 3 or len(soostring) > 21:
module.fail_json(
msg='Error: The len of soostring %s is out of [3 - 21].' % soostring)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<soostring></soostring>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<soostring>(.*)</soostring>.*', recv_xml)
if re_find:
result["soostring"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != soostring:
need_cfg = True
else:
need_cfg = True
default_rt_adv_enable = module.params['default_rt_adv_enable']
if default_rt_adv_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<defaultRtAdvEnable></defaultRtAdvEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultRtAdvEnable>(.*)</defaultRtAdvEnable>.*', recv_xml)
if re_find:
result["default_rt_adv_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != default_rt_adv_enable:
need_cfg = True
else:
need_cfg = True
default_rt_adv_policy = module.params['default_rt_adv_policy']
if default_rt_adv_policy:
if len(default_rt_adv_policy) < 1 or len(default_rt_adv_policy) > 40:
module.fail_json(
msg='Error: The len of default_rt_adv_policy %s is out of [1 - 40].' % default_rt_adv_policy)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<defaultRtAdvPolicy></defaultRtAdvPolicy>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultRtAdvPolicy>(.*)</defaultRtAdvPolicy>.*', recv_xml)
if re_find:
result["default_rt_adv_policy"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != default_rt_adv_policy:
need_cfg = True
else:
need_cfg = True
default_rt_match_mode = module.params['default_rt_match_mode']
if default_rt_match_mode:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<defaultRtMatchMode></defaultRtMatchMode>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultRtMatchMode>(.*)</defaultRtMatchMode>.*', recv_xml)
if re_find:
result["default_rt_match_mode"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != default_rt_match_mode:
need_cfg = True
else:
need_cfg = True
add_path_mode = module.params['add_path_mode']
if add_path_mode:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<addPathMode></addPathMode>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<addPathMode>(.*)</addPathMode>.*', recv_xml)
if re_find:
result["add_path_mode"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != add_path_mode:
need_cfg = True
else:
need_cfg = True
adv_add_path_num = module.params['adv_add_path_num']
if adv_add_path_num:
if int(orftype) < 2 or int(orftype) > 64:
module.fail_json(
msg='Error: The value of adv_add_path_num %s is out of [2 - 64].' % adv_add_path_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advAddPathNum></advAddPathNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advAddPathNum>(.*)</advAddPathNum>.*', recv_xml)
if re_find:
result["adv_add_path_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != adv_add_path_num:
need_cfg = True
else:
need_cfg = True
origin_as_valid = module.params['origin_as_valid']
if origin_as_valid != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<originAsValid></originAsValid>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<originAsValid>(.*)</originAsValid>.*', recv_xml)
if re_find:
result["origin_as_valid"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != origin_as_valid:
need_cfg = True
else:
need_cfg = True
vpls_enable = module.params['vpls_enable']
if vpls_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<vplsEnable></vplsEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<vplsEnable>(.*)</vplsEnable>.*', recv_xml)
if re_find:
result["vpls_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != vpls_enable:
need_cfg = True
else:
need_cfg = True
vpls_ad_disable = module.params['vpls_ad_disable']
if vpls_ad_disable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<vplsAdDisable></vplsAdDisable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<vplsAdDisable>(.*)</vplsAdDisable>.*', recv_xml)
if re_find:
result["vpls_ad_disable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != vpls_ad_disable:
need_cfg = True
else:
need_cfg = True
update_pkt_standard_compatible = module.params[
'update_pkt_standard_compatible']
if update_pkt_standard_compatible != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<updatePktStandardCompatible></updatePktStandardCompatible>" + \
CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<updatePktStandardCompatible>(.*)</updatePktStandardCompatible>.*', recv_xml)
if re_find:
result["update_pkt_standard_compatible"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != update_pkt_standard_compatible:
need_cfg = True
else:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def merge_bgp_peer_af(self, **kwargs):
""" merge_bgp_peer_af """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_MERGE_BGP_PEER_AF_HEADER % (
vrf_name, af_type, remote_address) + CE_MERGE_BGP_PEER_AF_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp peer address family failed.')
cmds = []
if af_type == "ipv4uni":
cmd = "ipv4-family unicast"
elif af_type == "ipv4multi":
cmd = "ipv4-family multicast"
elif af_type == "ipv6uni":
cmd = "ipv6-family unicast"
cmds.append(cmd)
cmd = "peer %s" % remote_address
cmds.append(cmd)
return cmds
def create_bgp_peer_af(self, **kwargs):
""" create_bgp_peer_af """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_CREATE_BGP_PEER_AF % (vrf_name, af_type, remote_address)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create bgp peer address family failed.')
cmds = []
if af_type == "ipv4uni":
cmd = "ipv4-family unicast"
elif af_type == "ipv4multi":
cmd = "ipv4-family multicast"
elif af_type == "ipv6uni":
cmd = "ipv6-family unicast"
cmds.append(cmd)
cmd = "peer %s" % remote_address
cmds.append(cmd)
return cmds
def delete_bgp_peer_af(self, **kwargs):
""" delete_bgp_peer_af """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_DELETE_BGP_PEER_AF % (vrf_name, af_type, remote_address)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete bgp peer address family failed.')
cmds = []
if af_type == "ipv4uni":
cmd = "ipv4-family unicast"
elif af_type == "ipv4multi":
cmd = "ipv4-family multicast"
elif af_type == "ipv6uni":
cmd = "ipv6-family unicast"
cmds.append(cmd)
cmd = "undo peer %s" % remote_address
cmds.append(cmd)
return cmds
def merge_bgp_peer_af_other(self, **kwargs):
""" merge_bgp_peer_af_other """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_MERGE_BGP_PEER_AF_HEADER % (
vrf_name, af_type, remote_address)
cmds = []
advertise_irb = module.params['advertise_irb']
if advertise_irb != 'no_use':
conf_str += "<advertiseIrb>%s</advertiseIrb>" % advertise_irb
if advertise_irb == "ture":
cmd = "peer %s advertise irb" % remote_address
else:
cmd = "undo peer %s advertise irb" % remote_address
cmds.append(cmd)
advertise_arp = module.params['advertise_arp']
if advertise_arp != 'no_use':
conf_str += "<advertiseArp>%s</advertiseArp>" % advertise_arp
if advertise_arp == "ture":
cmd = "peer %s advertise arp" % remote_address
else:
cmd = "undo peer %s advertise arp" % remote_address
cmds.append(cmd)
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
if advertise_remote_nexthop != 'no_use':
conf_str += "<advertiseRemoteNexthop>%s</advertiseRemoteNexthop>" % advertise_remote_nexthop
if advertise_remote_nexthop == "true":
cmd = "peer %s advertise remote-nexthop" % remote_address
else:
cmd = "undo peer %s advertise remote-nexthop" % remote_address
cmds.append(cmd)
advertise_community = module.params['advertise_community']
if advertise_community != 'no_use':
conf_str += "<advertiseCommunity>%s</advertiseCommunity>" % advertise_community
if advertise_community == "true":
cmd = "peer %s advertise-community" % remote_address
else:
cmd = "undo peer %s advertise-community" % remote_address
cmds.append(cmd)
advertise_ext_community = module.params['advertise_ext_community']
if advertise_ext_community != 'no_use':
conf_str += "<advertiseExtCommunity>%s</advertiseExtCommunity>" % advertise_ext_community
if advertise_ext_community == "true":
cmd = "peer %s advertise-ext-community" % remote_address
else:
cmd = "undo peer %s advertise-ext-community" % remote_address
cmds.append(cmd)
discard_ext_community = module.params['discard_ext_community']
if discard_ext_community != 'no_use':
conf_str += "<discardExtCommunity>%s</discardExtCommunity>" % discard_ext_community
if discard_ext_community == "true":
cmd = "peer %s discard-ext-community" % remote_address
else:
cmd = "undo peer %s discard-ext-community" % remote_address
cmds.append(cmd)
allow_as_loop_enable = module.params['allow_as_loop_enable']
if allow_as_loop_enable != 'no_use':
conf_str += "<allowAsLoopEnable>%s</allowAsLoopEnable>" % allow_as_loop_enable
if allow_as_loop_enable == "true":
cmd = "peer %s allow-as-loop" % remote_address
else:
cmd = "undo peer %s allow-as-loop" % remote_address
cmds.append(cmd)
allow_as_loop_limit = module.params['allow_as_loop_limit']
if allow_as_loop_limit:
conf_str += "<allowAsLoopLimit>%s</allowAsLoopLimit>" % allow_as_loop_limit
if allow_as_loop_enable == "true":
cmd = "peer %s allow-as-loop %s" % (remote_address, allow_as_loop_limit)
else:
cmd = "undo peer %s allow-as-loop" % remote_address
cmds.append(cmd)
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str += "<keepAllRoutes>%s</keepAllRoutes>" % keep_all_routes
if keep_all_routes == "true":
cmd = "peer %s keep-all-routes" % remote_address
else:
cmd = "undo peer %s keep-all-routes" % remote_address
cmds.append(cmd)
nexthop_configure = module.params['nexthop_configure']
if nexthop_configure:
conf_str += "<nextHopConfigure>%s</nextHopConfigure>" % nexthop_configure
if nexthop_configure == "local":
cmd = "peer %s next-hop-local" % remote_address
cmds.append(cmd)
elif nexthop_configure == "invariable":
cmd = "peer %s next-hop-invariable" % remote_address
cmds.append(cmd)
preferred_value = module.params['preferred_value']
if preferred_value:
conf_str += "<preferredValue>%s</preferredValue>" % preferred_value
cmd = "peer %s preferred-value %s" % (remote_address, preferred_value)
cmds.append(cmd)
public_as_only = module.params['public_as_only']
if public_as_only != 'no_use':
conf_str += "<publicAsOnly>%s</publicAsOnly>" % public_as_only
if public_as_only == "true":
cmd = "peer %s public-as-only" % remote_address
else:
cmd = "undo peer %s public-as-only" % remote_address
cmds.append(cmd)
public_as_only_force = module.params['public_as_only_force']
if public_as_only_force != 'no_use':
conf_str += "<publicAsOnlyForce>%s</publicAsOnlyForce>" % public_as_only_force
if public_as_only_force == "true":
cmd = "peer %s public-as-only force" % remote_address
else:
cmd = "undo peer %s public-as-only force" % remote_address
cmds.append(cmd)
public_as_only_limited = module.params['public_as_only_limited']
if public_as_only_limited != 'no_use':
conf_str += "<publicAsOnlyLimited>%s</publicAsOnlyLimited>" % public_as_only_limited
if public_as_only_limited == "true":
cmd = "peer %s public-as-only limited" % remote_address
else:
cmd = "undo peer %s public-as-only limited" % remote_address
cmds.append(cmd)
public_as_only_replace = module.params['public_as_only_replace']
if public_as_only_replace != 'no_use':
conf_str += "<publicAsOnlyReplace>%s</publicAsOnlyReplace>" % public_as_only_replace
if public_as_only_replace == "true":
cmd = "peer %s public-as-only force replace" % remote_address
else:
cmd = "undo peer %s public-as-only force replace" % remote_address
cmds.append(cmd)
public_as_only_skip_peer_as = module.params[
'public_as_only_skip_peer_as']
if public_as_only_skip_peer_as != 'no_use':
conf_str += "<publicAsOnlySkipPeerAs>%s</publicAsOnlySkipPeerAs>" % public_as_only_skip_peer_as
if public_as_only_skip_peer_as == "true":
cmd = "peer %s public-as-only force include-peer-as" % remote_address
else:
cmd = "undo peer %s public-as-only force include-peer-as" % remote_address
cmds.append(cmd)
route_limit = module.params['route_limit']
if route_limit:
conf_str += "<routeLimit>%s</routeLimit>" % route_limit
cmd = "peer %s route-limit %s" % (remote_address, route_limit)
cmds.append(cmd)
route_limit_percent = module.params['route_limit_percent']
if route_limit_percent:
conf_str += "<routeLimitPercent>%s</routeLimitPercent>" % route_limit_percent
cmd = "peer %s route-limit %s %s" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
route_limit_type = module.params['route_limit_type']
if route_limit_type:
conf_str += "<routeLimitType>%s</routeLimitType>" % route_limit_type
if route_limit_type == "alertOnly":
cmd = "peer %s route-limit %s %s alert-only" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
elif route_limit_type == "idleForever":
cmd = "peer %s route-limit %s %s idle-forever" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
elif route_limit_type == "idleTimeout":
cmd = "peer %s route-limit %s %s idle-timeout" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
if route_limit_idle_timeout:
conf_str += "<routeLimitIdleTimeout>%s</routeLimitIdleTimeout>" % route_limit_idle_timeout
cmd = "peer %s route-limit %s %s idle-timeout %s" % (remote_address, route_limit,
route_limit_percent, route_limit_idle_timeout)
cmds.append(cmd)
rt_updt_interval = module.params['rt_updt_interval']
if rt_updt_interval:
conf_str += "<rtUpdtInterval>%s</rtUpdtInterval>" % rt_updt_interval
cmd = "peer %s route-update-interval %s" % (remote_address, rt_updt_interval)
cmds.append(cmd)
redirect_ip = module.params['redirect_ip']
if redirect_ip != 'no_use':
conf_str += "<redirectIP>%s</redirectIP>" % redirect_ip
redirect_ip_vaildation = module.params['redirect_ip_vaildation']
if redirect_ip_vaildation != 'no_use':
conf_str += "<redirectIPVaildation>%s</redirectIPVaildation>" % redirect_ip_vaildation
reflect_client = module.params['reflect_client']
if reflect_client != 'no_use':
conf_str += "<reflectClient>%s</reflectClient>" % reflect_client
if reflect_client == "true":
cmd = "peer %s reflect-client" % remote_address
else:
cmd = "undo peer %s reflect-client" % remote_address
cmds.append(cmd)
substitute_as_enable = module.params['substitute_as_enable']
if substitute_as_enable != 'no_use':
conf_str += "<substituteAsEnable>%s</substituteAsEnable>" % substitute_as_enable
import_rt_policy_name = module.params['import_rt_policy_name']
if import_rt_policy_name:
conf_str += "<importRtPolicyName>%s</importRtPolicyName>" % import_rt_policy_name
cmd = "peer %s route-policy %s import" % (remote_address, import_rt_policy_name)
cmds.append(cmd)
export_rt_policy_name = module.params['export_rt_policy_name']
if export_rt_policy_name:
conf_str += "<exportRtPolicyName>%s</exportRtPolicyName>" % export_rt_policy_name
cmd = "peer %s route-policy %s export" % (remote_address, export_rt_policy_name)
cmds.append(cmd)
import_pref_filt_name = module.params['import_pref_filt_name']
if import_pref_filt_name:
conf_str += "<importPrefFiltName>%s</importPrefFiltName>" % import_pref_filt_name
cmd = "peer %s filter-policy %s import" % (remote_address, import_pref_filt_name)
cmds.append(cmd)
export_pref_filt_name = module.params['export_pref_filt_name']
if export_pref_filt_name:
conf_str += "<exportPrefFiltName>%s</exportPrefFiltName>" % export_pref_filt_name
cmd = "peer %s filter-policy %s export" % (remote_address, export_pref_filt_name)
cmds.append(cmd)
import_as_path_filter = module.params['import_as_path_filter']
if import_as_path_filter:
conf_str += "<importAsPathFilter>%s</importAsPathFilter>" % import_as_path_filter
cmd = "peer %s as-path-filter %s import" % (remote_address, import_as_path_filter)
cmds.append(cmd)
export_as_path_filter = module.params['export_as_path_filter']
if export_as_path_filter:
conf_str += "<exportAsPathFilter>%s</exportAsPathFilter>" % export_as_path_filter
cmd = "peer %s as-path-filter %s export" % (remote_address, export_as_path_filter)
cmds.append(cmd)
import_as_path_name_or_num = module.params[
'import_as_path_name_or_num']
if import_as_path_name_or_num:
conf_str += "<importAsPathNameOrNum>%s</importAsPathNameOrNum>" % import_as_path_name_or_num
cmd = "peer %s as-path-filter %s import" % (remote_address, import_as_path_name_or_num)
cmds.append(cmd)
export_as_path_name_or_num = module.params[
'export_as_path_name_or_num']
if export_as_path_name_or_num:
conf_str += "<exportAsPathNameOrNum>%s</exportAsPathNameOrNum>" % export_as_path_name_or_num
cmd = "peer %s as-path-filter %s export" % (remote_address, export_as_path_name_or_num)
cmds.append(cmd)
import_acl_name_or_num = module.params['import_acl_name_or_num']
if import_acl_name_or_num:
conf_str += "<importAclNameOrNum>%s</importAclNameOrNum>" % import_acl_name_or_num
cmd = "peer %s filter-policy %s import" % (remote_address, import_acl_name_or_num)
cmds.append(cmd)
export_acl_name_or_num = module.params['export_acl_name_or_num']
if export_acl_name_or_num:
conf_str += "<exportAclNameOrNum>%s</exportAclNameOrNum>" % export_acl_name_or_num
cmd = "peer %s filter-policy %s export" % (remote_address, export_acl_name_or_num)
cmds.append(cmd)
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
if ipprefix_orf_enable != 'no_use':
conf_str += "<ipprefixOrfEnable>%s</ipprefixOrfEnable>" % ipprefix_orf_enable
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf ip-prefix" % remote_address
else:
cmd = "undo peer %s capability-advertise orf ip-prefix" % remote_address
cmds.append(cmd)
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
if is_nonstd_ipprefix_mod != 'no_use':
conf_str += "<isNonstdIpprefixMod>%s</isNonstdIpprefixMod>" % is_nonstd_ipprefix_mod
if is_nonstd_ipprefix_mod == "true":
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf non-standard-compatible" % remote_address
else:
cmd = "undo peer %s capability-advertise orf non-standard-compatible" % remote_address
cmds.append(cmd)
else:
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf" % remote_address
else:
cmd = "undo peer %s capability-advertise orf" % remote_address
cmds.append(cmd)
orftype = module.params['orftype']
if orftype:
conf_str += "<orftype>%s</orftype>" % orftype
orf_mode = module.params['orf_mode']
if orf_mode:
conf_str += "<orfMode>%s</orfMode>" % orf_mode
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf ip-prefix %s" % (remote_address, orf_mode)
else:
cmd = "undo peer %s capability-advertise orf ip-prefix %s" % (remote_address, orf_mode)
cmds.append(cmd)
soostring = module.params['soostring']
if soostring:
conf_str += "<soostring>%s</soostring>" % soostring
cmd = "peer %s soo %s" % (remote_address, soostring)
cmds.append(cmd)
cmd = ""
default_rt_adv_enable = module.params['default_rt_adv_enable']
if default_rt_adv_enable != 'no_use':
conf_str += "<defaultRtAdvEnable>%s</defaultRtAdvEnable>" % default_rt_adv_enable
if default_rt_adv_enable == "true":
cmd += "peer %s default-route-advertise" % remote_address
else:
cmd += "undo peer %s default-route-advertise" % remote_address
default_rt_adv_policy = module.params['default_rt_adv_policy']
if default_rt_adv_policy:
conf_str += "<defaultRtAdvPolicy>%s</defaultRtAdvPolicy>" % default_rt_adv_policy
cmd += " route-policy %s" % default_rt_adv_policy
default_rt_match_mode = module.params['default_rt_match_mode']
if default_rt_match_mode:
conf_str += "<defaultRtMatchMode>%s</defaultRtMatchMode>" % default_rt_match_mode
if default_rt_match_mode == "matchall":
cmd += " conditional-route-match-all"
elif default_rt_match_mode == "matchany":
cmd += " conditional-route-match-any"
if cmd:
cmds.append(cmd)
add_path_mode = module.params['add_path_mode']
if add_path_mode:
conf_str += "<addPathMode>%s</addPathMode>" % add_path_mode
adv_add_path_num = module.params['adv_add_path_num']
if adv_add_path_num:
conf_str += "<advAddPathNum>%s</advAddPathNum>" % adv_add_path_num
origin_as_valid = module.params['origin_as_valid']
if origin_as_valid != 'no_use':
conf_str += "<originAsValid>%s</originAsValid>" % origin_as_valid
vpls_enable = module.params['vpls_enable']
if vpls_enable != 'no_use':
conf_str += "<vplsEnable>%s</vplsEnable>" % vpls_enable
vpls_ad_disable = module.params['vpls_ad_disable']
if vpls_ad_disable != 'no_use':
conf_str += "<vplsAdDisable>%s</vplsAdDisable>" % vpls_ad_disable
update_pkt_standard_compatible = module.params[
'update_pkt_standard_compatible']
if update_pkt_standard_compatible != 'no_use':
conf_str += "<updatePktStandardCompatible>%s</updatePktStandardCompatible>" % update_pkt_standard_compatible
conf_str += CE_MERGE_BGP_PEER_AF_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp peer address family other failed.')
return cmds
def main():
""" main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
vrf_name=dict(type='str', required=True),
af_type=dict(choices=['ipv4uni', 'ipv4multi', 'ipv4vpn',
'ipv6uni', 'ipv6vpn', 'evpn'], required=True),
remote_address=dict(type='str', required=True),
advertise_irb=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_arp=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_remote_nexthop=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_ext_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
discard_ext_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
allow_as_loop_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
allow_as_loop_limit=dict(type='str'),
keep_all_routes=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
nexthop_configure=dict(choices=['null', 'local', 'invariable']),
preferred_value=dict(type='str'),
public_as_only=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_force=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_limited=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_replace=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_skip_peer_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
route_limit=dict(type='str'),
route_limit_percent=dict(type='str'),
route_limit_type=dict(
choices=['noparameter', 'alertOnly', 'idleForever', 'idleTimeout']),
route_limit_idle_timeout=dict(type='str'),
rt_updt_interval=dict(type='str'),
redirect_ip=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
redirect_ip_vaildation=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
reflect_client=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
substitute_as_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
import_rt_policy_name=dict(type='str'),
export_rt_policy_name=dict(type='str'),
import_pref_filt_name=dict(type='str'),
export_pref_filt_name=dict(type='str'),
import_as_path_filter=dict(type='str'),
export_as_path_filter=dict(type='str'),
import_as_path_name_or_num=dict(type='str'),
export_as_path_name_or_num=dict(type='str'),
import_acl_name_or_num=dict(type='str'),
export_acl_name_or_num=dict(type='str'),
ipprefix_orf_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
is_nonstd_ipprefix_mod=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
orftype=dict(type='str'),
orf_mode=dict(choices=['null', 'receive', 'send', 'both']),
soostring=dict(type='str'),
default_rt_adv_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
default_rt_adv_policy=dict(type='str'),
default_rt_match_mode=dict(choices=['null', 'matchall', 'matchany']),
add_path_mode=dict(choices=['null', 'receive', 'send', 'both']),
adv_add_path_num=dict(type='str'),
origin_as_valid=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
vpls_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
vpls_ad_disable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
update_pkt_standard_compatible=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']))
argument_spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
changed = False
proposed = dict()
existing = dict()
end_state = dict()
updates = []
state = module.params['state']
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
advertise_irb = module.params['advertise_irb']
advertise_arp = module.params['advertise_arp']
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
advertise_community = module.params['advertise_community']
advertise_ext_community = module.params['advertise_ext_community']
discard_ext_community = module.params['discard_ext_community']
allow_as_loop_enable = module.params['allow_as_loop_enable']
allow_as_loop_limit = module.params['allow_as_loop_limit']
keep_all_routes = module.params['keep_all_routes']
nexthop_configure = module.params['nexthop_configure']
preferred_value = module.params['preferred_value']
public_as_only = module.params['public_as_only']
public_as_only_force = module.params['public_as_only_force']
public_as_only_limited = module.params['public_as_only_limited']
public_as_only_replace = module.params['public_as_only_replace']
public_as_only_skip_peer_as = module.params['public_as_only_skip_peer_as']
route_limit = module.params['route_limit']
route_limit_percent = module.params['route_limit_percent']
route_limit_type = module.params['route_limit_type']
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
rt_updt_interval = module.params['rt_updt_interval']
redirect_ip = module.params['redirect_ip']
redirect_ip_vaildation = module.params['redirect_ip_vaildation']
reflect_client = module.params['reflect_client']
substitute_as_enable = module.params['substitute_as_enable']
import_rt_policy_name = module.params['import_rt_policy_name']
export_rt_policy_name = module.params['export_rt_policy_name']
import_pref_filt_name = module.params['import_pref_filt_name']
export_pref_filt_name = module.params['export_pref_filt_name']
import_as_path_filter = module.params['import_as_path_filter']
export_as_path_filter = module.params['export_as_path_filter']
import_as_path_name_or_num = module.params['import_as_path_name_or_num']
export_as_path_name_or_num = module.params['export_as_path_name_or_num']
import_acl_name_or_num = module.params['import_acl_name_or_num']
export_acl_name_or_num = module.params['export_acl_name_or_num']
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
orftype = module.params['orftype']
orf_mode = module.params['orf_mode']
soostring = module.params['soostring']
default_rt_adv_enable = module.params['default_rt_adv_enable']
default_rt_adv_policy = module.params['default_rt_adv_policy']
default_rt_match_mode = module.params['default_rt_match_mode']
add_path_mode = module.params['add_path_mode']
adv_add_path_num = module.params['adv_add_path_num']
origin_as_valid = module.params['origin_as_valid']
vpls_enable = module.params['vpls_enable']
vpls_ad_disable = module.params['vpls_ad_disable']
update_pkt_standard_compatible = module.params[
'update_pkt_standard_compatible']
ce_bgp_peer_af_obj = BgpNeighborAf()
# get proposed
proposed["state"] = state
if vrf_name:
proposed["vrf_name"] = vrf_name
if af_type:
proposed["af_type"] = af_type
if remote_address:
proposed["remote_address"] = remote_address
if advertise_irb != 'no_use':
proposed["advertise_irb"] = advertise_irb
if advertise_arp != 'no_use':
proposed["advertise_arp"] = advertise_arp
if advertise_remote_nexthop != 'no_use':
proposed["advertise_remote_nexthop"] = advertise_remote_nexthop
if advertise_community != 'no_use':
proposed["advertise_community"] = advertise_community
if advertise_ext_community != 'no_use':
proposed["advertise_ext_community"] = advertise_ext_community
if discard_ext_community != 'no_use':
proposed["discard_ext_community"] = discard_ext_community
if allow_as_loop_enable != 'no_use':
proposed["allow_as_loop_enable"] = allow_as_loop_enable
if allow_as_loop_limit:
proposed["allow_as_loop_limit"] = allow_as_loop_limit
if keep_all_routes != 'no_use':
proposed["keep_all_routes"] = keep_all_routes
if nexthop_configure:
proposed["nexthop_configure"] = nexthop_configure
if preferred_value:
proposed["preferred_value"] = preferred_value
if public_as_only != 'no_use':
proposed["public_as_only"] = public_as_only
if public_as_only_force != 'no_use':
proposed["public_as_only_force"] = public_as_only_force
if public_as_only_limited != 'no_use':
proposed["public_as_only_limited"] = public_as_only_limited
if public_as_only_replace != 'no_use':
proposed["public_as_only_replace"] = public_as_only_replace
if public_as_only_skip_peer_as != 'no_use':
proposed["public_as_only_skip_peer_as"] = public_as_only_skip_peer_as
if route_limit:
proposed["route_limit"] = route_limit
if route_limit_percent:
proposed["route_limit_percent"] = route_limit_percent
if route_limit_type:
proposed["route_limit_type"] = route_limit_type
if route_limit_idle_timeout:
proposed["route_limit_idle_timeout"] = route_limit_idle_timeout
if rt_updt_interval:
proposed["rt_updt_interval"] = rt_updt_interval
if redirect_ip != 'no_use':
proposed["redirect_ip"] = redirect_ip
if redirect_ip_vaildation != 'no_use':
proposed["redirect_ip_vaildation"] = redirect_ip_vaildation
if reflect_client != 'no_use':
proposed["reflect_client"] = reflect_client
if substitute_as_enable != 'no_use':
proposed["substitute_as_enable"] = substitute_as_enable
if import_rt_policy_name:
proposed["import_rt_policy_name"] = import_rt_policy_name
if export_rt_policy_name:
proposed["export_rt_policy_name"] = export_rt_policy_name
if import_pref_filt_name:
proposed["import_pref_filt_name"] = import_pref_filt_name
if export_pref_filt_name:
proposed["export_pref_filt_name"] = export_pref_filt_name
if import_as_path_filter:
proposed["import_as_path_filter"] = import_as_path_filter
if export_as_path_filter:
proposed["export_as_path_filter"] = export_as_path_filter
if import_as_path_name_or_num:
proposed["import_as_path_name_or_num"] = import_as_path_name_or_num
if export_as_path_name_or_num:
proposed["export_as_path_name_or_num"] = export_as_path_name_or_num
if import_acl_name_or_num:
proposed["import_acl_name_or_num"] = import_acl_name_or_num
if export_acl_name_or_num:
proposed["export_acl_name_or_num"] = export_acl_name_or_num
if ipprefix_orf_enable != 'no_use':
proposed["ipprefix_orf_enable"] = ipprefix_orf_enable
if is_nonstd_ipprefix_mod != 'no_use':
proposed["is_nonstd_ipprefix_mod"] = is_nonstd_ipprefix_mod
if orftype:
proposed["orftype"] = orftype
if orf_mode:
proposed["orf_mode"] = orf_mode
if soostring:
proposed["soostring"] = soostring
if default_rt_adv_enable != 'no_use':
proposed["default_rt_adv_enable"] = default_rt_adv_enable
if default_rt_adv_policy:
proposed["default_rt_adv_policy"] = default_rt_adv_policy
if default_rt_match_mode:
proposed["default_rt_match_mode"] = default_rt_match_mode
if add_path_mode:
proposed["add_path_mode"] = add_path_mode
if adv_add_path_num:
proposed["adv_add_path_num"] = adv_add_path_num
if origin_as_valid != 'no_use':
proposed["origin_as_valid"] = origin_as_valid
if vpls_enable != 'no_use':
proposed["vpls_enable"] = vpls_enable
if vpls_ad_disable != 'no_use':
proposed["vpls_ad_disable"] = vpls_ad_disable
if update_pkt_standard_compatible != 'no_use':
proposed["update_pkt_standard_compatible"] = update_pkt_standard_compatible
if not ce_bgp_peer_af_obj:
module.fail_json(msg='Error: Init module failed.')
bgp_peer_af_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_args(
module=module)
bgp_peer_af_other_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_other(
module=module)
# state exist bgp peer address family config
exist_tmp = dict()
for item in bgp_peer_af_rst:
if item != "need_cfg":
exist_tmp[item] = bgp_peer_af_rst[item]
if exist_tmp:
existing["bgp neighbor af"] = exist_tmp
# state exist bgp peer address family other config
exist_tmp = dict()
for item in bgp_peer_af_other_rst:
if item != "need_cfg":
exist_tmp[item] = bgp_peer_af_other_rst[item]
if exist_tmp:
existing["bgp neighbor af other"] = exist_tmp
if state == "present":
if bgp_peer_af_rst["need_cfg"]:
if "remote_address" in bgp_peer_af_rst.keys():
cmd = ce_bgp_peer_af_obj.merge_bgp_peer_af(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
cmd = ce_bgp_peer_af_obj.create_bgp_peer_af(module=module)
changed = True
for item in cmd:
updates.append(item)
if bgp_peer_af_other_rst["need_cfg"]:
cmd = ce_bgp_peer_af_obj.merge_bgp_peer_af_other(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
if bgp_peer_af_rst["need_cfg"]:
cmd = ce_bgp_peer_af_obj.delete_bgp_peer_af(module=module)
changed = True
for item in cmd:
updates.append(item)
if bgp_peer_af_other_rst["need_cfg"]:
pass
# state end bgp peer address family config
bgp_peer_af_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_args(
module=module)
end_tmp = dict()
for item in bgp_peer_af_rst:
if item != "need_cfg":
end_tmp[item] = bgp_peer_af_rst[item]
if end_tmp:
end_state["bgp neighbor af"] = end_tmp
# state end bgp peer address family other config
bgp_peer_af_other_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_other(
module=module)
end_tmp = dict()
for item in bgp_peer_af_other_rst:
if item != "need_cfg":
end_tmp[item] = bgp_peer_af_other_rst[item]
if end_tmp:
end_state["bgp neighbor af other"] = end_tmp
results = dict()
results['proposed'] = proposed
results['existing'] = existing
results['changed'] = changed
results['end_state'] = end_state
results['updates'] = updates
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
kevin-intel/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 10 | 16467 | """
Testing Recursive feature elimination
"""
from operator import attrgetter
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy import sparse
from sklearn.feature_selection import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR, LinearSVR
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GroupKFold
from sklearn.compose import TransformedTargetRegressor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
from sklearn.utils._testing import ignore_warnings
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier:
"""
Dummy classifier to test recursive feature elimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, y):
assert len(X) == len(y)
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, y=None):
return 0.
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def _more_tags(self):
return {"allow_nan": True}
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert len(rfe.ranking_) == X.shape[1]
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert len(rfe.ranking_) == X.shape[1]
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert X_r.shape == iris.data.shape
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert rfe.score(X, y) == clf.score(iris.data, iris.target)
assert_array_almost_equal(X_r, X_r_sparse.toarray())
@pytest.mark.parametrize("n_features_to_select", [-1, 2.1])
def test_rfe_invalid_n_features_errors(n_features_to_select):
clf = SVC(kernel="linear")
iris = load_iris()
rfe = RFE(estimator=clf, n_features_to_select=n_features_to_select,
step=0.1)
msg = f"n_features_to_select must be .+ Got {n_features_to_select}"
with pytest.raises(ValueError, match=msg):
rfe.fit(iris.data, iris.target)
def test_rfe_percent_n_features():
# test that the results are the same
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# there are 10 features in the data. We select 40%.
clf = SVC(kernel="linear")
rfe_num = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe_num.fit(X, y)
rfe_perc = RFE(estimator=clf, n_features_to_select=0.4, step=0.1)
rfe_perc.fit(X, y)
assert_array_equal(rfe_perc.ranking_, rfe_num.ranking_)
assert_array_equal(rfe_perc.support_, rfe_num.support_)
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert len(rfe.ranking_) == X.shape[1]
assert X_r.shape == iris.data.shape
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert len(rfecv.grid_scores_) == X.shape[1]
assert len(rfecv.ranking_) == X.shape[1]
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# In the event of cross validation score ties, the expected behavior of
# RFECV is to return the FEWEST features that maximize the CV score.
# Because test_scorer always returns 1.0 in this example, RFECV should
# reduce the dimensionality to a single feature (i.e. n_features_ = 1)
assert rfecv.n_features_ == 1
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2)
rfecv.fit(X, y)
assert len(rfecv.grid_scores_) == 6
assert len(rfecv.ranking_) == X.shape[1]
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Verifying that steps < 1 don't blow up.
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=.2)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert len(rfecv.grid_scores_) == X.shape[1]
assert len(rfecv.ranking_) == X.shape[1]
def test_rfecv_verbose_output():
# Check verbose=1 is producing an output.
from io import StringIO
import sys
sys.stdout = StringIO()
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, verbose=1)
rfecv.fit(X, y)
verbose_output = sys.stdout
verbose_output.seek(0)
assert len(verbose_output.readline()) > 0
def test_rfecv_grid_scores_size():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Non-regression test for varying combinations of step and
# min_features_to_select.
for step, min_features_to_select in [[2, 1], [2, 2], [3, 3]]:
rfecv = RFECV(estimator=MockClassifier(), step=step,
min_features_to_select=min_features_to_select)
rfecv.fit(X, y)
score_len = np.ceil(
(X.shape[1] - min_features_to_select) / step) + 1
assert len(rfecv.grid_scores_) == score_len
assert len(rfecv.ranking_) == X.shape[1]
assert rfecv.n_features_ >= min_features_to_select
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert rfe._estimator_type == "classifier"
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert score.min() > .7
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert sel.support_.sum() == n_features // 2
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert sel.support_.sum() == n_features // 2
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert sel.support_.sum() == n_features // 2
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert (np.max(rfe.ranking_) ==
formula1(n_features, n_features_to_select, step))
assert (np.max(rfe.ranking_) ==
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step)
rfecv.fit(X, y)
assert (rfecv.grid_scores_.shape[0] ==
formula1(n_features, n_features_to_select, step))
assert (rfecv.grid_scores_.shape[0] ==
formula2(n_features, n_features_to_select, step))
def test_rfe_cv_n_jobs():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
rfecv = RFECV(estimator=SVC(kernel='linear'))
rfecv.fit(X, y)
rfecv_ranking = rfecv.ranking_
rfecv_grid_scores = rfecv.grid_scores_
rfecv.set_params(n_jobs=2)
rfecv.fit(X, y)
assert_array_almost_equal(rfecv.ranking_, rfecv_ranking)
assert_array_almost_equal(rfecv.grid_scores_, rfecv_grid_scores)
def test_rfe_cv_groups():
generator = check_random_state(0)
iris = load_iris()
number_groups = 4
groups = np.floor(np.linspace(0, number_groups, len(iris.target)))
X = iris.data
y = (iris.target > 0).astype(int)
est_groups = RFECV(
estimator=RandomForestClassifier(random_state=generator),
step=1,
scoring='accuracy',
cv=GroupKFold(n_splits=2)
)
est_groups.fit(X, y, groups=groups)
assert est_groups.n_features_ > 0
@pytest.mark.parametrize(
'importance_getter',
[attrgetter('regressor_.coef_'), 'regressor_.coef_'])
@pytest.mark.parametrize('selector, expected_n_features',
[(RFE, 5), (RFECV, 4)])
def test_rfe_wrapped_estimator(importance_getter, selector,
expected_n_features):
# Non-regression test for
# https://github.com/scikit-learn/scikit-learn/issues/15312
X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
estimator = LinearSVR(random_state=0)
log_estimator = TransformedTargetRegressor(regressor=estimator,
func=np.log,
inverse_func=np.exp)
selector = selector(log_estimator, importance_getter=importance_getter)
sel = selector.fit(X, y)
assert sel.support_.sum() == expected_n_features
@pytest.mark.parametrize(
"importance_getter, err_type",
[("auto", ValueError),
("random", AttributeError),
(lambda x: x.importance, AttributeError),
([0], ValueError)]
)
@pytest.mark.parametrize("Selector", [RFE, RFECV])
def test_rfe_importance_getter_validation(importance_getter, err_type,
Selector):
X, y = make_friedman1(n_samples=50, n_features=10, random_state=42)
estimator = LinearSVR()
log_estimator = TransformedTargetRegressor(
regressor=estimator, func=np.log, inverse_func=np.exp
)
with pytest.raises(err_type):
model = Selector(log_estimator, importance_getter=importance_getter)
model.fit(X, y)
@pytest.mark.parametrize("cv", [None, 5])
def test_rfe_allow_nan_inf_in_x(cv):
iris = load_iris()
X = iris.data
y = iris.target
# add nan and inf value to X
X[0][0] = np.NaN
X[0][1] = np.Inf
clf = MockClassifier()
if cv is not None:
rfe = RFECV(estimator=clf, cv=cv)
else:
rfe = RFE(estimator=clf)
rfe.fit(X, y)
rfe.transform(X)
def test_w_pipeline_2d_coef_():
pipeline = make_pipeline(StandardScaler(), LogisticRegression())
data, y = load_iris(return_X_y=True)
sfm = RFE(pipeline, n_features_to_select=2,
importance_getter='named_steps.logisticregression.coef_')
sfm.fit(data, y)
assert sfm.transform(data).shape[1] == 2
@pytest.mark.parametrize('ClsRFE', [
RFE,
RFECV
])
def test_multioutput(ClsRFE):
X = np.random.normal(size=(10, 3))
y = np.random.randint(2, size=(10, 2))
clf = RandomForestClassifier(n_estimators=5)
rfe_test = ClsRFE(clf)
rfe_test.fit(X, y)
| bsd-3-clause |
Mako-kun/mangaki | mangaki/mangaki/utils/svd.py | 2 | 5410 | from django.contrib.auth.models import User
from mangaki.models import Rating, Work, Recommendation
from mangaki.utils.chrono import Chrono
from mangaki.utils.values import rating_values
from scipy.sparse import lil_matrix
from sklearn.utils.extmath import randomized_svd
import numpy as np
from django.db import connection
import pickle
import json
import math
NB_COMPONENTS = 10
TOP = 10
class MangakiSVD(object):
M = None
U = None
sigma = None
VT = None
chrono = None
inv_work = None
inv_user = None
work_titles = None
def __init__(self):
self.chrono = Chrono(True)
def save(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self, f)
def load(self, filename):
with open(filename, 'rb') as f:
backup = pickle.load(f)
self.M = backup.M
self.U = backup.U
self.sigma = backup.sigma
self.VT = backup.VT
self.inv_work = backup.inv_work
self.inv_user = backup.inv_user
self.work_titles = backup.work_titles
def fit(self, X, y):
self.work_titles = {}
for work in Work.objects.values('id', 'title'):
self.work_titles[work['id']] = work['title']
work_ids = list(Rating.objects.values_list('work_id', flat=True).distinct())
nb_works = len(work_ids)
self.inv_work = {work_ids[i]: i for i in range(nb_works)}
user_ids = list(User.objects.values_list('id', flat=True))
nb_users = len(user_ids)
self.inv_user = {user_ids[i]: i for i in range(nb_users)}
self.chrono.save('get_work_ids')
# print("Computing M: (%i × %i)" % (nb_users, nb_works))
self.M = lil_matrix((nb_users, nb_works))
"""ratings_of = {}
for (user_id, work_id), rating in zip(X, y):
ratings_of.setdefault(user_id, []).append(rating)"""
for (user_id, work_id), rating in zip(X, y):
self.M[self.inv_user[user_id], self.inv_work[work_id]] = rating #- np.mean(ratings_of[user_id])
# np.save('backupM', self.M)
self.chrono.save('fill matrix')
# Ranking computation
self.U, self.sigma, self.VT = randomized_svd(self.M, NB_COMPONENTS, n_iter=3, random_state=42)
# print('Formes', self.U.shape, self.sigma.shape, self.VT.shape)
self.save('backup.pickle')
self.chrono.save('factor matrix')
def predict(self, X):
y = []
for user_id, work_id in X:
i = self.inv_user[user_id]
j = self.inv_work[work_id]
y.append(self.U[i].dot(np.diag(self.sigma)).dot(self.VT.transpose()[j]))
return np.array(y)
def get_reco(self, username, sending=False):
target_user = User.objects.get(username=username)
the_user_id = target_user.id
svd_user = User.objects.get(username='svd')
work_ids = {self.inv_work[work_id]: work_id for work_id in self.inv_work}
nb_works = len(work_ids)
seen_works = set(Rating.objects.filter(user__id=the_user_id).exclude(choice='willsee').values_list('work_id', flat=True))
the_i = self.inv_user[the_user_id]
self.chrono.save('get_seen_works')
print('mon vecteur (taille %d)' % len(self.U[the_i]), self.U[the_i])
print(self.sigma)
for i, line in enumerate(self.VT):
print('=> Ligne %d' % (i + 1), '(ma note : %f)' % self.U[the_i][i])
sorted_line = sorted((line[j], self.work_titles[work_ids[j]]) for j in range(nb_works))[::-1]
top5 = sorted_line[:10]
bottom5 = sorted_line[-10:]
for anime in top5:
print(anime)
for anime in bottom5:
print(anime)
"""if i == 0 or i == 1: # First two vectors explaining variance
with open('vector%d.json' % (i + 1), 'w') as f:
vi = X.dot(line).tolist()
x_norm = [np.dot(X.data[k], X.data[k]) / (nb_works + 1) for k in range(nb_users + 1)]
f.write(json.dumps({'v': [v / math.sqrt(x_norm[k]) if x_norm[k] != 0 else float('inf') for k, v in enumerate(vi)]}))"""
# print(VT.dot(VT.transpose()))
# return
the_ratings = self.predict((the_user_id, work_ids[j]) for j in range(nb_works))
ranking = sorted(zip(the_ratings, [(work_ids[j], self.work_titles[work_ids[j]]) for j in range(nb_works)]), reverse=True)
# Summarize the results of the ranking for the_user_id:
# “=> rank, title, score”
c = 0
for i, (rating, (work_id, title)) in enumerate(ranking, start=1):
if work_id not in seen_works:
print('=>', i, title, rating, self.predict([(the_user_id, work_id)]))
if Recommendation.objects.filter(user=svd_user, target_user__id=the_user_id, work__id=work_id).count() == 0:
Recommendation.objects.create(user=svd_user, target_user_id=the_user_id, work_id=work_id)
c += 1
elif i < TOP:
print(i, title, rating)
if c >= TOP:
break
"""print(len(connection.queries), 'queries')
for line in connection.queries:
print(line)"""
self.chrono.save('complete')
def __str__(self):
return '[SVD]'
def get_shortname(self):
return 'svd'
| agpl-3.0 |
mikelum/pyspeckit | pyspeckit/mpfit/mpfitexpr.py | 11 | 2384 | """
Copyright (C) 2009 Sergey Koposov
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import mpfit
import re
import numpy
def mpfitexpr(func, x, y, err , start_params, check=True, full_output=False, **kw):
"""Fit the used defined expression to the data
Input:
- func: string with the function definition
- x: x vector
- y: y vector
- err: vector with the errors of y
- start_params: the starting parameters for the fit
Output:
- The tuple (params, yfit) with best-fit params and the values of func evaluated at x
Keywords:
- check: boolean parameter. If true(default) the function will be checked for sanity
- full_output: boolean parameter. If True(default is False) then instead of best-fit parameters the mpfit object is returned
Example:
params,yfit=mpfitexpr('p[0]+p[2]*(x-p[1])',x,y,err,[0,10,1])
If you need to use numpy functions in your function, then
you must to use the full names of these functions, e.g.:
numpy.sin, numpy.cos etc.
This function is motivated by mpfitexpr() from wonderful MPFIT IDL package
written by Craig Markwardt
"""
def myfunc(p,fjac=None,x=None, y=None, err=None):
return [0, eval('(y-(%s))/err'%func)]
myre = "[^a-zA-Z]p\[(\d+)\]"
r = re.compile(myre)
maxp = -1
for m in re.finditer(r,func):
curp = int(m.group(1))
maxp = curp if curp > maxp else maxp
if check:
if maxp == -1:
raise Exception("wrong function format")
if maxp + 1 != len(start_params):
raise Exception("the length of the start_params != the length of the parameter verctor of the function")
fa={'x' : x, 'y' : y,'err' : err}
res = mpfit.mpfit(myfunc,start_params,functkw=fa,**kw)
yfit = eval(func, globals(), {'x':x, 'p': res.params})
if full_output:
return (res, yfit)
else:
return (res.params, yfit)
| mit |
rationalAgent/edx-platform-custom | lms/djangoapps/foldit/models.py | 65 | 5129 | import logging
from django.contrib.auth.models import User
from django.db import models
log = logging.getLogger(__name__)
class Score(models.Model):
"""
This model stores the scores of different users on FoldIt problems.
"""
user = models.ForeignKey(User, db_index=True,
related_name='foldit_scores')
# The XModule that wants to access this doesn't have access to the real
# userid. Save the anonymized version so we can look up by that.
unique_user_id = models.CharField(max_length=50, db_index=True)
puzzle_id = models.IntegerField()
best_score = models.FloatField(db_index=True)
current_score = models.FloatField(db_index=True)
score_version = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
@staticmethod
def display_score(score, sum_of=1):
"""
Argument:
score (float), as stored in the DB (i.e., "rosetta score")
sum_of (int): if this score is the sum of scores of individual
problems, how many elements are in that sum
Returns:
score (float), as displayed to the user in the game and in the leaderboard
"""
return (-score) * 10 + 8000 * sum_of
@staticmethod
def get_tops_n(n, puzzles=['994559'], course_list=None):
"""
Arguments:
puzzles: a list of puzzle ids that we will use. If not specified,
defaults to puzzle used in 7012x.
n (int): number of top scores to return
Returns:
The top n sum of scores for puzzles in <puzzles>,
filtered by course. If no courses is specified we default
the pool of students to all courses. Output is a list
of dictionaries, sorted by display_score:
[ {username: 'a_user',
score: 12000} ...]
"""
if not isinstance(puzzles, list):
puzzles = [puzzles]
if course_list is None:
scores = Score.objects \
.filter(puzzle_id__in=puzzles) \
.annotate(total_score=models.Sum('best_score')) \
.order_by('total_score')[:n]
else:
scores = Score.objects \
.filter(puzzle_id__in=puzzles) \
.filter(user__courseenrollment__course_id__in=course_list) \
.annotate(total_score=models.Sum('best_score')) \
.order_by('total_score')[:n]
num = len(puzzles)
return [
{'username': score.user.username,
'score': Score.display_score(score.total_score, num)}
for score in scores
]
class PuzzleComplete(models.Model):
"""
This keeps track of the sets of puzzles completed by each user.
e.g. PuzzleID 1234, set 1, subset 3. (Sets and subsets correspond to levels
in the intro puzzles)
"""
class Meta:
# there should only be one puzzle complete entry for any particular
# puzzle for any user
unique_together = ('user', 'puzzle_id', 'puzzle_set', 'puzzle_subset')
ordering = ['puzzle_id']
user = models.ForeignKey(User, db_index=True,
related_name='foldit_puzzles_complete')
# The XModule that wants to access this doesn't have access to the real
# userid. Save the anonymized version so we can look up by that.
unique_user_id = models.CharField(max_length=50, db_index=True)
puzzle_id = models.IntegerField()
puzzle_set = models.IntegerField(db_index=True)
puzzle_subset = models.IntegerField(db_index=True)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return "PuzzleComplete({0}, id={1}, set={2}, subset={3}, created={4})".format(
self.user.username, self.puzzle_id,
self.puzzle_set, self.puzzle_subset,
self.created)
@staticmethod
def completed_puzzles(anonymous_user_id):
"""
Return a list of puzzles that this user has completed, as an array of
dicts:
[ {'set': int,
'subset': int,
'created': datetime} ]
"""
complete = PuzzleComplete.objects.filter(unique_user_id=anonymous_user_id)
return [{'set': c.puzzle_set,
'subset': c.puzzle_subset,
'created': c.created} for c in complete]
@staticmethod
def is_level_complete(anonymous_user_id, level, sub_level, due=None):
"""
Return True if this user completed level--sub_level by due.
Users see levels as e.g. 4-5.
Args:
level: int
sub_level: int
due (optional): If specified, a datetime. Ignored if None.
"""
complete = PuzzleComplete.objects.filter(unique_user_id=anonymous_user_id,
puzzle_set=level,
puzzle_subset=sub_level)
if due is not None:
complete = complete.filter(created__lte=due)
return complete.exists()
| agpl-3.0 |
MartialD/hyperspy | hyperspy/drawing/tiles.py | 4 | 2899 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from hyperspy.drawing.figure import BlittedFigure
from hyperspy.drawing import utils
class HistogramTilePlot(BlittedFigure):
def __init__(self):
self.figure = None
self.title = ''
self.ax = None
def create_axis(self, ncols=1, nrows=1, number=1, title=''):
ax = self.figure.add_subplot(ncols, nrows, number)
ax.set_title(title)
ax.hspy_fig = self
return ax
def plot(self, db, **kwargs):
if self.figure is None:
self.create_figure()
ncomps = len(db)
if not ncomps:
return
else:
self.update(db, **kwargs)
def update(self, db, **kwargs):
ncomps = len(db)
# get / set axes
i = -1
for c_n, v in db.items():
i += 1
ncols = len(v)
istart = ncols * i
j = 0
for p_n, (hist, bin_edges) in v.items():
j += 1
mask = hist > 0
if np.any(mask):
title = c_n + ' ' + p_n
ax = self.create_axis(ncomps, ncols, istart + j, title)
self.ax = ax
# remove previous
while ax.patches:
ax.patches[0].remove()
# set new; only draw non-zero height bars
ax.bar(
bin_edges[
:-1][mask],
hist[mask],
np.diff(bin_edges)[mask],
# animated=True,
**kwargs)
width = bin_edges[-1] - bin_edges[0]
ax.set_xlim(
bin_edges[0] - width * 0.1, bin_edges[-1] + width * 0.1)
ax.set_ylim(0, np.max(hist) * 1.1)
# ax.set_title(c_n + ' ' + p_n)
self.figure.canvas.draw_idle()
def close(self):
try:
plt.close(self.figure)
except BaseException:
pass
self.figure = None
| gpl-3.0 |
orgito/ansible | lib/ansible/modules/windows/win_shortcut.py | 52 | 4021 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_shortcut
version_added: '2.3'
short_description: Manage shortcuts on Windows
description:
- Create, manage and delete Windows shortcuts
options:
src:
description:
- Executable or URL the shortcut points to.
- The executable needs to be in your PATH, or has to be an absolute
path to the executable.
type: str
description:
description:
- Description for the shortcut.
- This is usually shown when hoovering the icon.
type: str
dest:
description:
- Destination file for the shortcuting file.
- File name should have a C(.lnk) or C(.url) extension.
type: path
required: yes
arguments:
description:
- Additional arguments for the executable defined in C(src).
- Was originally just C(args) but renamed in Ansible 2.8.
type: str
aliases: [ args ]
directory:
description:
- Working directory for executable defined in C(src).
type: path
icon:
description:
- Icon used for the shortcut.
- File name should have a C(.ico) extension.
- The file name is followed by a comma and the number in the library file (.dll) or use 0 for an image file.
type: path
hotkey:
description:
- Key combination for the shortcut.
- This is a combination of one or more modifiers and a key.
- Possible modifiers are Alt, Ctrl, Shift, Ext.
- Possible keys are [A-Z] and [0-9].
type: str
windowstyle:
description:
- Influences how the application is displayed when it is launched.
type: str
choices: [ maximized, minimized, normal ]
state:
description:
- When C(absent), removes the shortcut if it exists.
- When C(present), creates or updates the shortcut.
type: str
choices: [ absent, present ]
default: present
run_as_admin:
description:
- When C(src) is an executable, this can control whether the shortcut will be opened as an administrator or not.
type: bool
default: no
version_added: '2.8'
notes:
- 'The following options can include Windows environment variables: C(dest), C(args), C(description), C(dest), C(directory), C(icon) C(src)'
- 'Windows has two types of shortcuts: Application and URL shortcuts. URL shortcuts only consists of C(dest) and C(src)'
seealso:
- module: win_file
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Create an application shortcut on the desktop
win_shortcut:
src: C:\Program Files\Mozilla Firefox\Firefox.exe
dest: C:\Users\Public\Desktop\Mozilla Firefox.lnk
icon: C:\Program Files\Mozilla Firefox\Firefox.exe,0
- name: Create the same shortcut using environment variables
win_shortcut:
description: The Mozilla Firefox web browser
src: '%ProgramFiles%\Mozilla Firefox\Firefox.exe'
dest: '%Public%\Desktop\Mozilla Firefox.lnk'
icon: '%ProgramFiles\Mozilla Firefox\Firefox.exe,0'
directory: '%ProgramFiles%\Mozilla Firefox'
hotkey: Ctrl+Alt+F
- name: Create an application shortcut for an executable in PATH to your desktop
win_shortcut:
src: cmd.exe
dest: Desktop\Command prompt.lnk
- name: Create an application shortcut for the Ansible website
win_shortcut:
src: '%ProgramFiles%\Google\Chrome\Application\chrome.exe'
dest: '%UserProfile%\Desktop\Ansible website.lnk'
arguments: --new-window https://ansible.com/
directory: '%ProgramFiles%\Google\Chrome\Application'
icon: '%ProgramFiles%\Google\Chrome\Application\chrome.exe,0'
hotkey: Ctrl+Alt+A
- name: Create a URL shortcut for the Ansible website
win_shortcut:
src: https://ansible.com/
dest: '%Public%\Desktop\Ansible website.url'
'''
RETURN = r'''
'''
| gpl-3.0 |
zhukaixy/kbengine | kbe/src/lib/python/Tools/freeze/makefreeze.py | 37 | 2706 | import marshal
import bkfile
# Write a file containing frozen code for the modules in the dictionary.
header = """
#include "Python.h"
static struct _frozen _PyImport_FrozenModules[] = {
"""
trailer = """\
{0, 0, 0} /* sentinel */
};
"""
# if __debug__ == 0 (i.e. -O option given), set Py_OptimizeFlag in frozen app.
default_entry_point = """
int
main(int argc, char **argv)
{
extern int Py_FrozenMain(int, char **);
""" + ((not __debug__ and """
Py_OptimizeFlag++;
""") or "") + """
PyImport_FrozenModules = _PyImport_FrozenModules;
return Py_FrozenMain(argc, argv);
}
"""
def makefreeze(base, dict, debug=0, entry_point=None, fail_import=()):
if entry_point is None: entry_point = default_entry_point
done = []
files = []
mods = sorted(dict.keys())
for mod in mods:
m = dict[mod]
mangled = "__".join(mod.split("."))
if m.__code__:
file = 'M_' + mangled + '.c'
outfp = bkfile.open(base + file, 'w')
files.append(file)
if debug:
print("freezing", mod, "...")
str = marshal.dumps(m.__code__)
size = len(str)
if m.__path__:
# Indicate package by negative size
size = -size
done.append((mod, mangled, size))
writecode(outfp, mangled, str)
outfp.close()
if debug:
print("generating table of frozen modules")
outfp = bkfile.open(base + 'frozen.c', 'w')
for mod, mangled, size in done:
outfp.write('extern unsigned char M_%s[];\n' % mangled)
outfp.write(header)
for mod, mangled, size in done:
outfp.write('\t{"%s", M_%s, %d},\n' % (mod, mangled, size))
outfp.write('\n')
# The following modules have a NULL code pointer, indicating
# that the frozen program should not search for them on the host
# system. Importing them will *always* raise an ImportError.
# The zero value size is never used.
for mod in fail_import:
outfp.write('\t{"%s", NULL, 0},\n' % (mod,))
outfp.write(trailer)
outfp.write(entry_point)
outfp.close()
return files
# Write a C initializer for a module containing the frozen python code.
# The array is called M_<mod>.
def writecode(outfp, mod, str):
outfp.write('unsigned char M_%s[] = {' % mod)
for i in range(0, len(str), 16):
outfp.write('\n\t')
for c in bytes(str[i:i+16]):
outfp.write('%d,' % c)
outfp.write('\n};\n')
## def writecode(outfp, mod, str):
## outfp.write('unsigned char M_%s[%d] = "%s";\n' % (mod, len(str),
## '\\"'.join(map(lambda s: repr(s)[1:-1], str.split('"')))))
| lgpl-3.0 |
psdh/servo | tests/wpt/harness/wptrunner/executors/executorservodriver.py | 21 | 8682 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
import socket
import threading
import time
import traceback
from .base import (Protocol,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
strip_server)
import webdriver
from ..testrunner import Stop
here = os.path.join(os.path.split(__file__)[0])
extra_timeout = 5
class ServoWebDriverProtocol(Protocol):
def __init__(self, executor, browser, capabilities, **kwargs):
Protocol.__init__(self, executor, browser)
self.capabilities = capabilities
self.host = browser.webdriver_host
self.port = browser.webdriver_port
self.session = None
def setup(self, runner):
"""Connect to browser via WebDriver."""
self.runner = runner
session_started = False
try:
self.session = webdriver.Session(self.host, self.port,
extension=webdriver.ServoExtensions)
self.session.start()
except:
self.logger.warning(
"Connecting with WebDriver failed:\n%s" % traceback.format_exc())
else:
self.logger.debug("session started")
session_started = True
if not session_started:
self.logger.warning("Failed to connect via WebDriver")
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
self.logger.debug("Hanging up on WebDriver session")
try:
self.session.end()
except:
pass
def is_alive(self):
try:
# Get a simple property over the connection
self.session.handle
# TODO what exception?
except Exception:
return False
return True
def after_connect(self):
pass
def wait(self):
while True:
try:
self.session.execute_async_script("")
except webdriver.TimeoutException:
pass
except (socket.timeout, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
def on_environment_change(self, old_environment, new_environment):
#Unset all the old prefs
self.session.extension.reset_prefs(*old_environment.get("prefs", {}).keys())
self.session.extension.set_prefs(new_environment.get("prefs", {}))
class ServoWebDriverRun(object):
def __init__(self, func, session, url, timeout, current_timeout=None):
self.func = func
self.result = None
self.session = session
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(self.timeout + extra_timeout)
if self.result is None:
assert not flag
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.url, self.timeout)
except webdriver.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", e)
finally:
self.result_flag.set()
def timeout_func(timeout):
if timeout:
t0 = time.time()
return lambda: time.time() - t0 > timeout + extra_timeout
else:
return lambda: False
class ServoWebDriverTestharnessExecutor(TestharnessExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None):
TestharnessExecutor.__init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None)
self.protocol = ServoWebDriverProtocol(self, browser, capabilities=capabilities)
with open(os.path.join(here, "testharness_servodriver.js")) as f:
self.script = f.read()
self.timeout = None
def on_protocol_change(self, new_protocol):
pass
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
url = self.test_url(test)
timeout = test.timeout * self.timeout_multiplier + extra_timeout
if timeout != self.timeout:
try:
self.protocol.session.timeouts.script = timeout
self.timeout = timeout
except IOError:
self.logger.error("Lost webdriver connection")
return Stop
success, data = ServoWebDriverRun(self.do_testharness,
self.protocol.session,
url,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, session, url, timeout):
session.url = url
result = json.loads(
session.execute_async_script(
self.script % {"abs_url": url,
"url": strip_server(url),
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout * 1000}))
# Prevent leaking every page in history until Servo develops a more sane
# page cache
session.back()
return result
class TimeoutError(Exception):
pass
class ServoWebDriverRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, capabilities=None, debug_info=None):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = ServoWebDriverProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.timeout = None
with open(os.path.join(here, "reftest-wait_servodriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
try:
result = self.implementation.run_test(test)
return self.convert_result(test, result)
except IOError:
return test.result_cls("CRASH", None), []
except TimeoutError:
return test.result_cls("TIMEOUT", None), []
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls("ERROR", message), []
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
timeout = (test.timeout * self.timeout_multiplier + extra_timeout
if self.debug_info is None else None)
if self.timeout != timeout:
try:
self.protocol.session.timeouts.script = timeout
self.timeout = timeout
except IOError:
self.logger.error("Lost webdriver connection")
return Stop
return ServoWebDriverRun(self._screenshot,
self.protocol.session,
self.test_url(test),
timeout).run()
def _screenshot(self, session, url, timeout):
session.url = url
session.execute_async_script(self.wait_script)
return session.screenshot()
| mpl-2.0 |
loli/medpy | doc/numpydoc/numpydoc/comment_eater.py | 1 | 5357 |
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import compiler
import inspect
import textwrap
import tokenize
from .compiler_unparse import unparse
class Comment(object):
""" A comment block.
"""
is_comment = True
def __init__(self, start_lineno, end_lineno, text):
# int : The first line number in the block. 1-indexed.
self.start_lineno = start_lineno
# int : The last line number. Inclusive!
self.end_lineno = end_lineno
# str : The text block including '#' character but not any leading spaces.
self.text = text
def add(self, string, start, end, line):
""" Add a new comment line.
"""
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
self.text += string
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno, self.text)
class NonComment(object):
""" A non-comment block of code.
"""
is_comment = False
def __init__(self, start_lineno, end_lineno):
self.start_lineno = start_lineno
self.end_lineno = end_lineno
def add(self, string, start, end, line):
""" Add lines to the block.
"""
if string.strip():
# Only add if not entirely whitespace.
self.start_lineno = min(self.start_lineno, start[0])
self.end_lineno = max(self.end_lineno, end[0])
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
self.end_lineno)
class CommentBlocker(object):
""" Pull out contiguous comment blocks.
"""
def __init__(self):
# Start with a dummy.
self.current_block = NonComment(0, 0)
# All of the blocks seen so far.
self.blocks = []
# The index mapping lines of code to their associated comment blocks.
self.index = {}
def process_file(self, file):
""" Process a file object.
"""
if sys.version_info[0] >= 3:
nxt = file.__next__
else:
nxt = file.__next__
for token in tokenize.generate_tokens(nxt):
self.process_token(*token)
self.make_index()
def process_token(self, kind, string, start, end, line):
""" Process a single token.
"""
if self.current_block.is_comment:
if kind == tokenize.COMMENT:
self.current_block.add(string, start, end, line)
else:
self.new_noncomment(start[0], end[0])
else:
if kind == tokenize.COMMENT:
self.new_comment(string, start, end, line)
else:
self.current_block.add(string, start, end, line)
def new_noncomment(self, start_lineno, end_lineno):
""" We are transitioning from a noncomment to a comment.
"""
block = NonComment(start_lineno, end_lineno)
self.blocks.append(block)
self.current_block = block
def new_comment(self, string, start, end, line):
""" Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
"""
prefix = line[:start[1]]
if prefix.strip():
# Oops! Trailing comment, not a comment block.
self.current_block.add(string, start, end, line)
else:
# A comment block.
block = Comment(start[0], end[0], string)
self.blocks.append(block)
self.current_block = block
def make_index(self):
""" Make the index mapping lines of actual code to their associated
prefix comments.
"""
for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
if not block.is_comment:
self.index[block.start_lineno] = prev
def search_for_comment(self, lineno, default=None):
""" Find the comment block just before the given line number.
Returns None (or the specified default) if there is no such block.
"""
if not self.index:
self.make_index()
block = self.index.get(lineno, None)
text = getattr(block, 'text', default)
return text
def strip_comment_marker(text):
""" Strip # markers at the front of a block of comment text.
"""
lines = []
for line in text.splitlines():
lines.append(line.lstrip('#'))
text = textwrap.dedent('\n'.join(lines))
return text
def get_class_traits(klass):
""" Yield all of the documentation for trait definitions on a class object.
"""
# FIXME: gracefully handle errors here or in the caller?
source = inspect.getsource(klass)
cb = CommentBlocker()
cb.process_file(StringIO(source))
mod_ast = compiler.parse(source)
class_ast = mod_ast.node.nodes[0]
for node in class_ast.code.nodes:
# FIXME: handle other kinds of assignments?
if isinstance(node, compiler.ast.Assign):
name = node.nodes[0].name
rhs = unparse(node.expr).strip()
doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
yield name, rhs, doc
| gpl-3.0 |
jeremiahyan/odoo | addons/crm_iap_lead/models/crm_iap_lead_mining_request.py | 1 | 15651 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import api, fields, models, _
from odoo.addons.iap.tools import iap_tools
from odoo.exceptions import UserError
_logger = logging.getLogger(__name__)
DEFAULT_ENDPOINT = 'https://iap-services.odoo.com'
MAX_LEAD = 200
MAX_CONTACT = 5
CREDIT_PER_COMPANY = 1
CREDIT_PER_CONTACT = 1
class CRMLeadMiningRequest(models.Model):
_name = 'crm.iap.lead.mining.request'
_description = 'CRM Lead Mining Request'
def _default_lead_type(self):
if self.env.user.has_group('crm.group_use_lead'):
return 'lead'
else:
return 'opportunity'
def _default_country_ids(self):
return self.env.user.company_id.country_id
name = fields.Char(string='Request Number', required=True, readonly=True, default=lambda self: _('New'), copy=False)
state = fields.Selection([('draft', 'Draft'), ('error', 'Error'), ('done', 'Done')], string='Status', required=True, default='draft')
# Request Data
lead_number = fields.Integer(string='Number of Leads', required=True, default=3)
search_type = fields.Selection([('companies', 'Companies'), ('people', 'Companies and their Contacts')], string='Target', required=True, default='companies')
error_type = fields.Selection([
('credits', 'Insufficient Credits'),
('no_result', 'No Result'),
], string='Error Type', readonly=True)
# Lead / Opportunity Data
lead_type = fields.Selection([('lead', 'Leads'), ('opportunity', 'Opportunities')], string='Type', required=True, default=_default_lead_type)
display_lead_label = fields.Char(compute='_compute_display_lead_label')
team_id = fields.Many2one(
'crm.team', string='Sales Team', ondelete="set null",
domain="[('use_opportunities', '=', True)]", readonly=False, compute='_compute_team_id', store=True)
user_id = fields.Many2one('res.users', string='Salesperson', default=lambda self: self.env.user)
tag_ids = fields.Many2many('crm.tag', string='Tags')
lead_ids = fields.One2many('crm.lead', 'lead_mining_request_id', string='Generated Lead / Opportunity')
lead_count = fields.Integer(compute='_compute_lead_count', string='Number of Generated Leads')
# Company Criteria Filter
filter_on_size = fields.Boolean(string='Filter on Size', default=False)
company_size_min = fields.Integer(string='Size', default=1)
company_size_max = fields.Integer(default=1000)
country_ids = fields.Many2many('res.country', string='Countries', default=_default_country_ids)
state_ids = fields.Many2many('res.country.state', string='States')
available_state_ids = fields.One2many('res.country.state', compute='_compute_available_state_ids',
help="List of available states based on selected countries")
industry_ids = fields.Many2many('crm.iap.lead.industry', string='Industries')
# Contact Generation Filter
contact_number = fields.Integer(string='Number of Contacts', default=10)
contact_filter_type = fields.Selection([('role', 'Role'), ('seniority', 'Seniority')], string='Filter on', default='role')
preferred_role_id = fields.Many2one('crm.iap.lead.role', string='Preferred Role')
role_ids = fields.Many2many('crm.iap.lead.role', string='Other Roles')
seniority_id = fields.Many2one('crm.iap.lead.seniority', string='Seniority')
# Fields for the blue tooltip
lead_credits = fields.Char(compute='_compute_tooltip', readonly=True)
lead_contacts_credits = fields.Char(compute='_compute_tooltip', readonly=True)
lead_total_credits = fields.Char(compute='_compute_tooltip', readonly=True)
@api.depends('lead_type', 'lead_number')
def _compute_display_lead_label(self):
selection_description_values = {
e[0]: e[1] for e in self._fields['lead_type']._description_selection(self.env)}
for request in self:
lead_type = selection_description_values[request.lead_type]
request.display_lead_label = '%s %s' % (request.lead_number, lead_type)
@api.onchange('lead_number', 'contact_number')
def _compute_tooltip(self):
for record in self:
company_credits = CREDIT_PER_COMPANY * record.lead_number
contact_credits = CREDIT_PER_CONTACT * record.contact_number
total_contact_credits = contact_credits * record.lead_number
record.lead_contacts_credits = _("Up to %d additional credits will be consumed to identify %d contacts per company.") % (contact_credits*company_credits, record.contact_number)
record.lead_credits = _('%d credits will be consumed to find %d companies.') % (company_credits, record.lead_number)
record.lead_total_credits = _("This makes a total of %d credits for this request.") % (total_contact_credits + company_credits)
@api.depends('lead_ids.lead_mining_request_id')
def _compute_lead_count(self):
if self.ids:
leads_data = self.env['crm.lead'].read_group(
[('lead_mining_request_id', 'in', self.ids)],
['lead_mining_request_id'], ['lead_mining_request_id'])
else:
leads_data = []
mapped_data = dict(
(m['lead_mining_request_id'][0], m['lead_mining_request_id_count'])
for m in leads_data)
for request in self:
request.lead_count = mapped_data.get(request.id, 0)
@api.depends('user_id', 'lead_type')
def _compute_team_id(self):
""" When changing the user, also set a team_id or restrict team id
to the ones user_id is member of. """
for mining in self:
# setting user as void should not trigger a new team computation
if not mining.user_id:
continue
user = mining.user_id
if mining.team_id and user in mining.team_id.member_ids | mining.team_id.user_id:
continue
team_domain = [('use_leads', '=', True)] if mining.lead_type == 'lead' else [('use_opportunities', '=', True)]
team = self.env['crm.team']._get_default_team_id(user_id=user.id, domain=team_domain)
mining.team_id = team.id
@api.depends('country_ids')
def _compute_available_state_ids(self):
""" States for some specific countries should not be offered as filtering options because
they drastically reduce the amount of IAP reveal results.
For example, in Belgium, only 11% of companies have a defined state within the
reveal service while the rest of them have no state defined at all.
Meaning specifying states for that country will yield a lot less results than what you could
expect, which is not the desired behavior.
Obviously all companies are active within a state, it's just a lack of data in the reveal
service side.
To help users create meaningful iap searches, we only keep the states filtering for several
whitelisted countries (based on their country code).
The complete list and reasons for this change can be found on task-2471703. """
for lead_mining_request in self:
countries = lead_mining_request.country_ids.filtered(lambda country:
country.code in iap_tools._STATES_FILTER_COUNTRIES_WHITELIST)
lead_mining_request.available_state_ids = self.env['res.country.state'].search([
('country_id', 'in', countries.ids)
])
@api.onchange('available_state_ids')
def _onchange_available_state_ids(self):
self.state_ids -= self.state_ids.filtered(
lambda state: (state._origin.id or state.id) not in self.available_state_ids.ids
)
@api.onchange('lead_number')
def _onchange_lead_number(self):
if self.lead_number <= 0:
self.lead_number = 1
elif self.lead_number > MAX_LEAD:
self.lead_number = MAX_LEAD
@api.onchange('contact_number')
def _onchange_contact_number(self):
if self.contact_number <= 0:
self.contact_number = 1
elif self.contact_number > MAX_CONTACT:
self.contact_number = MAX_CONTACT
@api.onchange('country_ids')
def _onchange_country_ids(self):
self.state_ids = []
@api.onchange('company_size_min')
def _onchange_company_size_min(self):
if self.company_size_min <= 0:
self.company_size_min = 1
elif self.company_size_min > self.company_size_max:
self.company_size_min = self.company_size_max
@api.onchange('company_size_max')
def _onchange_company_size_max(self):
if self.company_size_max < self.company_size_min:
self.company_size_max = self.company_size_min
def _prepare_iap_payload(self):
"""
This will prepare the data to send to the server
"""
self.ensure_one()
payload = {'lead_number': self.lead_number,
'search_type': self.search_type,
'countries': self.country_ids.mapped('code')}
if self.state_ids:
payload['states'] = self.state_ids.mapped('code')
if self.filter_on_size:
payload.update({'company_size_min': self.company_size_min,
'company_size_max': self.company_size_max})
if self.industry_ids:
# accumulate all reveal_ids (separated by ',') into one list
# eg: 3 records with values: "175,176", "177" and "190,191"
# will become ['175','176','177','190','191']
all_industry_ids = [
reveal_id.strip()
for reveal_ids in self.mapped('industry_ids.reveal_ids')
for reveal_id in reveal_ids.split(',')
]
payload['industry_ids'] = all_industry_ids
if self.search_type == 'people':
payload.update({'contact_number': self.contact_number,
'contact_filter_type': self.contact_filter_type})
if self.contact_filter_type == 'role':
payload.update({'preferred_role': self.preferred_role_id.reveal_id,
'other_roles': self.role_ids.mapped('reveal_id')})
elif self.contact_filter_type == 'seniority':
payload['seniority'] = self.seniority_id.reveal_id
return payload
def _perform_request(self):
"""
This will perform the request and create the corresponding leads.
The user will be notified if he hasn't enough credits.
"""
self.error_type = False
server_payload = self._prepare_iap_payload()
reveal_account = self.env['iap.account'].get('reveal')
dbuuid = self.env['ir.config_parameter'].sudo().get_param('database.uuid')
endpoint = self.env['ir.config_parameter'].sudo().get_param('reveal.endpoint', DEFAULT_ENDPOINT) + '/iap/clearbit/1/lead_mining_request'
params = {
'account_token': reveal_account.account_token,
'dbuuid': dbuuid,
'data': server_payload
}
try:
response = iap_tools.iap_jsonrpc(endpoint, params=params, timeout=300)
if not response.get('data'):
self.error_type = 'no_result'
return False
return response['data']
except iap_tools.InsufficientCreditError as e:
self.error_type = 'credits'
self.state = 'error'
return False
except Exception as e:
raise UserError(_("Your request could not be executed: %s", e))
def _create_leads_from_response(self, result):
""" This method will get the response from the service and create the leads accordingly """
self.ensure_one()
lead_vals_list = []
messages_to_post = {}
for data in result:
lead_vals_list.append(self._lead_vals_from_response(data))
template_values = data['company_data']
template_values.update({
'flavor_text': _("Opportunity created by Odoo Lead Generation"),
'people_data': data.get('people_data'),
})
messages_to_post[data['company_data']['clearbit_id']] = template_values
leads = self.env['crm.lead'].create(lead_vals_list)
for lead in leads:
if messages_to_post.get(lead.reveal_id):
lead.message_post_with_view('iap_mail.enrich_company', values=messages_to_post[lead.reveal_id], subtype_id=self.env.ref('mail.mt_note').id)
# Methods responsible for format response data into valid odoo lead data
@api.model
def _lead_vals_from_response(self, data):
self.ensure_one()
company_data = data.get('company_data')
people_data = data.get('people_data')
lead_vals = self.env['crm.iap.lead.helpers'].lead_vals_from_response(self.lead_type, self.team_id.id, self.tag_ids.ids, self.user_id.id, company_data, people_data)
lead_vals['lead_mining_request_id'] = self.id
return lead_vals
@api.model
def get_empty_list_help(self, help):
help_title = _('Create a Lead Mining Request')
sub_title = _('Generate new leads based on their country, industry, size, etc.')
return '<p class="o_view_nocontent_smiling_face">%s</p><p class="oe_view_nocontent_alias">%s</p>' % (help_title, sub_title)
def action_draft(self):
self.ensure_one()
self.name = _('New')
self.state = 'draft'
def action_submit(self):
self.ensure_one()
if self.name == _('New'):
self.name = self.env['ir.sequence'].next_by_code('crm.iap.lead.mining.request') or _('New')
results = self._perform_request()
if results:
self._create_leads_from_response(results)
self.state = 'done'
if self.lead_type == 'lead':
return self.action_get_lead_action()
elif self.lead_type == 'opportunity':
return self.action_get_opportunity_action()
elif self.env.context.get('is_modal'):
# when we are inside a modal already, we re-open the same record
# that way, the form view is updated and the correct error message appears
# (sadly, there is no way to simply 'reload' a form view within a modal)
return {
'name': _('Generate Leads'),
'res_model': 'crm.iap.lead.mining.request',
'views': [[False, 'form']],
'target': 'new',
'type': 'ir.actions.act_window',
'res_id': self.id,
'context': dict(self.env.context, edit=True, form_view_initial_mode='edit')
}
else:
# will reload the form view and show the error message on top
return False
def action_get_lead_action(self):
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("crm.crm_lead_all_leads")
action['domain'] = [('id', 'in', self.lead_ids.ids), ('type', '=', 'lead')]
return action
def action_get_opportunity_action(self):
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("crm.crm_lead_opportunities")
action['domain'] = [('id', 'in', self.lead_ids.ids), ('type', '=', 'opportunity')]
return action
def action_buy_credits(self):
return {
'type': 'ir.actions.act_url',
'url': self.env['iap.account'].get_credits_url(service_name='reveal'),
}
| gpl-3.0 |
ogajduse/spacewalk | client/debian/packages-already-in-debian/rhn-client-tools/test/testutils.py | 4 | 9439 | #!/usr/bin/python
import os,sys
import getopt
import shutil
import glob
DBPATH="/tmp/testdb"
DBCACHEPATH="/tmp/testdbcaches/"
UP2DATE_COMMAND="up2date --justdb --dbpath $DBPATH"
DATAPATH="/tmp/datadir"
TOPDIR="../"
DBDIR="%s/testdbs" % "/usr/src/rhn/test/up2date/depsolve/"
PKGDIR="%s/testpackages" % "/usr/src/rhn/test/up2date/rollbacks/"
RESULTSPATH="%s/results/" % (TOPDIR)
CONFIGPATH="%s/configs" % "/usr/src/rhn/test/up2date/depsolve/"
REPACKAGEDIR="/tmp/testrepackage"
PLATFORMPATH="/etc/rpm/platform"
def createDbDir():
# remove the old if its there
try:
shutil.rmtree(DBPATH)
except OSError:
#whatever...
print "%s doesnt exist, creating it" % DBPATH
pass
# make the new
if not os.access(DBPATH, os.W_OK):
os.makedirs(DBPATH)
def createRepackageDir():
# remove the old if its there
try:
shutil.rmtree(REPACKAGEDIR)
except OSError:
#whatever...
print "%s doesnt exist, creating it" % REPACKAGEDIR
pass
# make the new
if not os.access(REPACKAGEDIR, os.W_OK):
os.makedirs(REPACKAGEDIR)
def createDataDirs():
createDbDir()
createRepackageDir()
if not os.access(DATAPATH, os.W_OK):
os.makedirs(DATAPATH)
def rebuildRepackageDir(repackageName):
createRepackageDir()
files = glob.glob("%s/%s/*.rpm" % (PKGDIR, repackageName))
for file in files:
shutil.copy(file , REPACKAGEDIR)
# fetch a copy of the rebuild db from the cache if we have it
def lookForDbCache(dbname):
if not os.access("%s/%s" % (DBCACHEPATH, dbname), os.R_OK):
return 1
#print "Using db cache"
files = glob.glob("%s/%s/*" % (DBCACHEPATH, dbname))
for file in files:
shutil.copy(file, DBPATH)
return 0
def populateDbCache(dbname):
cache = "%s/%s" % (DBCACHEPATH, dbname)
os.makedirs(cache)
files = glob.glob("%s/*" % DBPATH)
for file in files:
shutil.copy(file, cache)
def rebuildRpmDatabase(dbname):
createDbDir()
if lookForDbCache(dbname):
print "Rebuilding rpm database"
shutil.copy("%s/%s/Packages" % (DBDIR,dbname) , DBPATH)
cmdline = "rpm -v --dbpath %s --rebuilddb" % DBPATH
fd = os.popen(cmdline)
fd.read()
fd.close()
populateDbCache(dbname)
def buildUp2dateCommand(options):
ret = "up2date --justdb --dbpath %s %s" % (DBPATH, options)
return ret
def getRpmQAList():
cmdline = "rpm --dbpath %s -qa" % DBPATH
fd = os.popen(cmdline)
tmp = fd.readlines()
out = map(lambda a:a.strip(), tmp)
fd.close()
return out
def runUp2date(cmd):
fd = os.popen(cmd)
ret = fd.read()
fd.close()
return ret
def storeResults(results,testname, type):
if type == "pre":
fd = open("%s/%s.pre" % (DATAPATH,testname), "w")
fd.write("\n".join(results))
fd.close()
if type == "after":
fd = open("%s/%s.post" % (DATAPATH,testname), "w")
fd.write("\n".join(results))
fd.close()
def saveUp2dateOut(up2dateOut,testname):
fd = open("%s/%s.up2date-out" % (DATAPATH,testname), "w")
fd.write(up2dateOut)
fd.close()
FILENAMES = ['/etc/sysconfig/rhn/up2date',
'/etc/sysconfig/rhn/systemid',
'/etc/sysconfig/rhn/sources',
'/etc/sysconfig/rhn/network']
def setupConfig(configname):
# copy over the approriate up2date and systemid
# to /etc/sysconfig/rhn
path = "%s/%s" % (CONFIGPATH, configname)
for filename in FILENAMES:
# store a backup of the original if it doesnt exist yet
if os.access(filename, os.R_OK) and \
not os.access("%s..orig-test" % filename, os.R_OK):
shutil.copy(filename, "%s.orig-test" % filename)
stored = "%s/%s" % (path, os.path.basename(filename))
if os.access(stored, os.R_OK):
shutil.copy(stored, filename)
def restoreConfig():
for filename in FILENAMES:
if os.access("%s.orig-test" % filename, os.R_OK):
shutil.copy("%s.orig-test" % filename, filename)
def logFailures(name):
fd = open("%s/FAILURES" % DATAPATH, "w+")
fd.write("%s\n" % name)
fd.close()
def runTestcase(testcase):
print "Generating an rpm db in %s based on %s" % (DBPATH, "%s/%s" % (DBDIR,testcase.dbname))
rebuildRpmDatabase(testcase.dbname)
cmd = buildUp2dateCommand(testcase.options)
beforeList = getRpmQAList()
storeResults(beforeList, testcase.name, "pre")
setupConfig(testcase.configs)
print "running up2date as:\n%s" % cmd
up2dateOut = runUp2date(cmd)
saveUp2dateOut(up2dateOut,testcase.name)
afterList = getRpmQAList()
storeResults(afterList, testcase.name, "after")
print "diff between before/after"
compareBeforeAfter(beforeList, afterList)
print "diff between results and expected results"
try:
ret = compareResults(testcase.results, afterList)
except "NoResultsError":
print "\n\nNo results listing (%s) was found for this test (%s)\n\n" % (testcase.results, testcase.name)
ret = 1
print ret
if ret:
print "\n----------- This Case (%s) Failed --------------" % testcase.name
print "cmd"
print "dbname: %s results: %s" % (testcase.dbname, testcase.results)
print "\n\n"
logFailures(testcase.name)
# restoreConfig()
def difflists(list1, list2):
in1_not_in2 = []
in2_not_in1 = []
for i in list1:
if i not in list2:
in1_not_in2.append(i)
for i in list2:
if i not in list1:
in2_not_in1.append(i)
return (in1_not_in2, in2_not_in1)
def compareBeforeAfter(before, after):
# print before
# print after
deleted, added = difflists(before, after)
print "added: %s" % added
print "deleted: %s" % deleted
def compareResults(resultsName, afterList):
#open the results file and read it in
resultsFile = "%s/%s" % (RESULTSPATH, resultsName)
if not os.access(resultsFile, os.R_OK):
raise "NoResultsError"
fd = open("%s/%s" % (RESULTSPATH, resultsName), "r")
expected = fd.readlines()
expected.sort()
tmp = map(lambda a:a.strip(), expected)
expected = tmp
afterList.sort()
deleted, added = difflists(afterList, expected)
print "epected but not found: %s" % added
print "not expected, but found: %s" % deleted
if len(deleted) == 0 and len(added) == 0:
return 0
else:
return 1
class Testcase:
def __init__(self, name=None, dbname=None,
configs=None, results=None, options=None):
self.name = name
self.dbname = dbname
self.configs = configs
self.options = options
self.results = results
def __repr__(self):
out = ""
out = out + "name: %s " % self.name
out = out + "dbname: %s " % self.dbname
out = out + "configs: %s " % self.configs
out = out + "results: %s" % self.results
out = out + "options: %s " % self.options
return out
testcases = []
testcasenames = []
def parsefile(m):
while 1:
s = m.readline()
if not len(s):
break
s = s[:-1]
if not s:
continue
s = s.strip() # remove whitespace
if s[0] == '#':
continue
line = s.split(':')
testcasename = line[0].strip()
dbname = line[1].strip()
configs = line[2].strip()
results = line[3].strip()
options = line[4].strip()
testcase = Testcase(testcasename, dbname, configs, results, options)
testcases.append(testcase)
testcasenames.append(testcase.name)
def main():
testcasename = None
printlist = None
opts, args = getopt.getopt(sys.argv[1:], "f:l",
["filename=", "list"])
for opt, val in opts:
if opt == "--filename" or opt == "-f":
filename = val
if opt == "--list" or opt == "-l":
printlist = 1
print "Available test cases include:"
print testcasenames
for i in testcasenames:
print "%s" % i
#testcasename = args
fd = open(filename, "r")
parsefile(fd)
#print testcases
if printlist:
print "Available test cases include:"
for i in testcasenames:
print "%s" % i
sys.exit()
createDataDirs()
# accept a list of test cases, including filename style globs
if args:
import fnmatch
matchedtestcases = []
for testcasename in args:
for testcase in testcasenames:
if fnmatch.fnmatch(testcase, testcasename):
matchedtestcases.append(testcase)
#uniq the list
tmp = {}
for i in matchedtestcases:
tmp[i] = i
matchedtestcases = tmp.keys()
matchedtestcases.sort()
for testcase in matchedtestcases:
print "going to run testcases: %s" % testcase
for testcase in matchedtestcases:
print "running testcase: %s" % testcase
runTestcase(testcases[testcasenames.index(testcase)])
if not testcasenames:
for testcase in testcases:
runTestcase(testcase)
#else:
# print "running testcase: %s" % testcasename
# runTestcase(testcases[testcasenames.index(testcasename)])
| gpl-2.0 |
dcroc16/skunk_works | google_appengine/lib/django-0.96/django/utils/translation/trans_null.py | 32 | 1167 | # These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
from django.conf import settings
def ngettext(singular, plural, number):
if number == 1: return singular
return plural
ngettext_lazy = ngettext
gettext = gettext_noop = gettext_lazy = _ = lambda x: x
string_concat = lambda *strings: ''.join([str(el) for el in strings])
activate = lambda x: None
deactivate = install = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
get_date_formats = lambda: (settings.DATE_FORMAT, settings.DATETIME_FORMAT, settings.TIME_FORMAT)
get_partial_date_formats = lambda: (settings.YEAR_MONTH_FORMAT, settings.MONTH_DAY_FORMAT)
check_for_language = lambda x: True
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def get_language_from_request(request):
return settings.LANGUAGE_CODE
| mit |
bstrebel/PyUtils | test/test_options.py | 1 | 2293 | import os, sys, logging, logging.config, pyutils
from pyutils import Options, LogAdapter, get_logger, log_level
def main():
from ConfigParser import ConfigParser
from argparse import ArgumentParser
options = {
'option': 'OPTION',
'secrets': {'token': 'secret'},
'loglevel': 'INFO'
}
# region Command line arguments
parser = ArgumentParser(description='PySnc Engine Rev. 0.1 (c) Bernd Strebel')
parser.add_argument('-c', '--config', type=str, help='use alternate configuration file')
parser.add_argument('-l', '--loglevel', type=str,
choices=['DEBUG', 'INFO', 'WARN', 'WARNING', 'ERROR', 'CRITICAL',
'debug', 'info', 'warn', 'warning', 'error', 'critical'],
help='debug log level')
args = parser.parse_args()
opts = Options(options, args, config=True)
# _logger = get_logger('root',log_level(opts.loglevel))
# logger = LogAdapter(_logger, {'package': 'init'})
# logger.info("Console logger initilized")
# endregion
# region (I) Basic configuration and logger settings from config file
# if opts.config_file:
# if os.path.isfile(opts.config_file):
# logging.config.fileConfig(opts.config_file)
# config = ConfigParser(options)
# config.read(opts.config_file)
# else:
# logger.critical('Configuration file %s not found!' % (opts.config_file))
# exit(1)
# else:
# logger.critical("Missing configuration file!")
# exit(1)
#
# _logger = logging.getLogger()
# _logger.setLevel(log_level(opts.loglevel))
#
# logger = LogAdapter(_logger, {'package': 'main'})
# endregion
# region (II) Basic configuration and logger settings from config parser
config = opts.config_parser
logger = LogAdapter(opts.logger, {'package': 'main'})
# endregion
logger.info('Default logger configured from %s' % (opts.config_file))
print opts.option
s = opts.get('string_option', False)
t = opts['secrets']['token']
e = opts['empty']
b = opts.get('uni')
u = opts['uni']
pass
#f = Options.get_bool_value(opt)
# region __Main__
if __name__ == '__main__':
main()
exit(0)
# endregion
| gpl-2.0 |
I-TECH-UW/mwachx | contacts/migrations_old/0015_auto_20160419_1803.py | 2 | 1652 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contacts', '0014_loss_date'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='condition',
field=models.CharField(default=b'normal', max_length=15, choices=[(b'art', b'1 - Starting ART'), (b'adolescent', b'2 - Adolescent'), (b'first', b'3 - First Time Mother'), (b'normal', b'4 - Normal')]),
),
migrations.AlterField(
model_name='contact',
name='delivery_source',
field=models.CharField(blank=True, max_length=10, verbose_name=b'Delivery Notification Source', choices=[(b'phone', b'Phone'), (b'sms', b'SMS'), (b'visit', b'Clinic Visit'), (b'm2m', b'Mothers to Mothers'), (b'other', b'Other')]),
),
migrations.AlterField(
model_name='contact',
name='status',
field=models.CharField(default=b'pregnant', max_length=15, choices=[(b'pregnant', b'Pregnant'), (b'over', b'Post-Date'), (b'post', b'Post-Partum'), (b'ccc', b'CCC'), (b'completed', b'Completed'), (b'stopped', b'Withdrew'), (b'loss', b'SAE opt-in'), (b'sae', b'SAE opt-out'), (b'other', b'Admin Stop'), (b'quit', b'Left Study')]),
),
migrations.AlterField(
model_name='visit',
name='visit_type',
field=models.CharField(default=b'clinic', max_length=25, choices=[(b'clinic', b'Clinic Visit'), (b'study', b'Study Visit'), (b'both', b'Both'), (b'delivery', b'Delivery')]),
),
]
| apache-2.0 |
Scemoon/lpts | site-packages/pychart/afm/Helvetica_Oblique.py | 12 | 1496 | # AFM font Helvetica-Oblique (path: /usr/share/fonts/afms/adobe/phvro8a.afm).
# Derived from Ghostscript distribution.
# Go to www.cs.wisc.edu/~ghost to get the Ghostcript source code.
import dir
dir.afm["Helvetica-Oblique"] = (500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 278, 278, 355, 556, 556, 889, 667, 222, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 278, 278, 584, 584, 584, 556, 1015, 667, 667, 722, 722, 667, 611, 778, 722, 278, 500, 667, 556, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667, 667, 611, 278, 278, 278, 469, 556, 222, 556, 556, 500, 556, 556, 278, 556, 556, 222, 222, 500, 222, 833, 556, 556, 556, 556, 333, 500, 278, 556, 500, 722, 500, 500, 500, 334, 260, 334, 584, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 333, 556, 556, 167, 556, 556, 556, 556, 191, 333, 556, 333, 333, 500, 500, 500, 556, 556, 556, 278, 500, 537, 350, 222, 333, 333, 556, 1000, 1000, 500, 611, 500, 333, 333, 333, 333, 333, 333, 333, 333, 500, 333, 333, 500, 333, 333, 333, 1000, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 1000, 500, 370, 500, 500, 500, 500, 556, 778, 1000, 365, 500, 500, 500, 500, 500, 889, 500, 500, 500, 278, 500, 500, 222, 611, 944, 611, )
| gpl-2.0 |
huongttlan/statsmodels | statsmodels/sandbox/formula.py | 27 | 22903 | """
Provides the basic classes needed to specify statistical models.
namespace : dictionary
mapping from names to data, used to associate data to a formula or term
"""
from statsmodels.compat.python import (iterkeys, lrange, callable, string_types,
itervalues, range)
import copy
import types
import numpy as np
__docformat__ = 'restructuredtext'
default_namespace = {}
class Term(object):
"""
This class is very simple: it is just a named term in a model formula.
It is also callable: by default it namespace[self.name], where namespace
defaults to formula.default_namespace.
When called in an instance of formula,
the namespace used is that formula's namespace.
Inheritance of the namespace under +,*,- operations:
----------------------------------------------------
By default, the namespace is empty, which means it must be
specified before evaluating the design matrix.
When it is unambiguous, the namespaces of objects are derived from the
context.
Rules:
------
i) "X * I", "X + I", "X**i": these inherit X's namespace
ii) "F.main_effect()": this inherits the Factor F's namespace
iii) "A-B": this inherits A's namespace
iv) if A.namespace == B.namespace, then A+B inherits this namespace
v) if A.namespace == B.namespace, then A*B inherits this namespace
Equality of namespaces:
-----------------------
This is done by comparing the namespaces directly, if
an exception is raised in the check of equality, they are
assumed not to be equal.
"""
def __pow__(self, power):
"""
Raise the quantitative term's values to an integer power, i.e.
polynomial.
"""
try:
power = float(power)
except:
raise ValueError('expecting a float')
if power == int(power):
name = '%s^%d' % (self.name, int(power))
else:
name = '%s^%0.2f' % (self.name, power)
value = Quantitative(name, func=self, transform=lambda x: np.power(x, power))
value.power = power
value.namespace = self.namespace
return value
def __init__(self, name, func=None, termname=None):
self.name = name
self.__namespace = None
if termname is None:
self.termname = name
else:
self.termname = termname
if not isinstance(self.termname, string_types):
raise ValueError('expecting a string for termname')
if func:
self.func = func
# Namespace in which self.name will be looked up in, if needed
def _get_namespace(self):
if isinstance(self.__namespace, np.ndarray):
return self.__namespace
else: return self.__namespace or default_namespace
def _set_namespace(self, value): self.__namespace = value
def _del_namespace(self): del self.__namespace
namespace = property(_get_namespace, _set_namespace, _del_namespace)
def __str__(self):
"""
'<term: %s>' % self.termname
"""
return '<term: %s>' % self.termname
def __add__(self, other):
"""
Formula(self) + Formula(other)
"""
fother = Formula(other, namespace=other.namespace)
f = fother + self
if _namespace_equal(fother.namespace, self.namespace):
f.namespace = self.namespace
return f
def __mul__(self, other):
"""
Formula(self) * Formula(other)
"""
if isinstance(other, Term) and other.name is 'intercept':
f = Formula(self, namespace=self.namespace)
elif self.name is 'intercept':
f = Formula(other, namespace=other.namespace)
else:
other = Formula(other, namespace=other.namespace)
f = other * self
if _namespace_equal(other.namespace, self.namespace):
f.namespace = self.namespace
return f
def names(self):
"""
Return the names of the columns in design associated to the terms,
i.e. len(self.names()) = self().shape[0].
"""
if isinstance(self.name, string_types):
return [self.name]
else:
return list(self.name)
def __call__(self, *args, **kw):
"""
Return the columns associated to self in a design matrix.
If the term has no 'func' attribute, it returns
``self.namespace[self.termname]``
else, it returns
``self.func(*args, **kw)``
"""
if not hasattr(self, 'func'):
val = self.namespace[self.termname]
else:
val = self.func
if callable(val):
if isinstance(val, (Term, Formula)):
val = copy.copy(val)
val.namespace = self.namespace
val = val(*args, **kw)
val = np.asarray(val)
return np.squeeze(val)
class Factor(Term):
"""A categorical factor."""
def __init__(self, termname, keys, ordinal=False):
"""
Factor is initialized with keys, representing all valid
levels of the factor.
If ordinal is False, keys can have repeats: set(keys) is what is
used.
If ordinal is True, the order is taken from the keys, and
there should be no repeats.
"""
if not ordinal:
self.keys = list(set(keys))
self.keys.sort()
else:
self.keys = keys
if len(set(keys)) != len(list(keys)):
raise ValueError('keys for ordinal Factor should be unique, in increasing order')
self._name = termname
self.termname = termname
self.ordinal = ordinal
if self.ordinal:
name = self.termname
else:
name = ['(%s==%s)' % (self.termname, str(key)) for key in self.keys]
Term.__init__(self, name, termname=self.termname, func=self.get_columns)
def get_columns(self, *args, **kw):
"""
Calling function for factor instance.
"""
v = self.namespace[self._name]
while True:
if callable(v):
if isinstance(v, (Term, Formula)):
v = copy.copy(v)
v.namespace = self.namespace
v = v(*args, **kw)
else: break
n = len(v)
if self.ordinal:
col = [float(self.keys.index(v[i])) for i in range(n)]
return np.array(col)
else:
value = []
for key in self.keys:
col = [float((v[i] == key)) for i in range(n)]
value.append(col)
return np.array(value)
def values(self, *args, **kw):
"""
Return the keys of the factor, rather than the columns of the design
matrix.
"""
del(self.func)
val = self(*args, **kw)
self.func = self.get_columns
return val
def verify(self, values):
"""
Verify that all values correspond to valid keys in self.
"""
s = set(values)
if not s.issubset(self.keys):
raise ValueError('unknown keys in values')
def __add__(self, other):
"""
Formula(self) + Formula(other)
When adding \'intercept\' to a factor, this just returns
Formula(self, namespace=self.namespace)
"""
if isinstance(other, Term) and other.name is 'intercept':
return Formula(self, namespace=self.namespace)
else:
return Term.__add__(self, other)
def main_effect(self, reference=None):
"""
Return the 'main effect' columns of a factor, choosing
an optional reference key.
The reference key can be one of the keys of the Factor,
or an integer, representing which column to remove.
It defaults to 0.
"""
names = self.names()
if reference is None:
reference = 0
else:
try:
reference = self.keys.index(reference)
except ValueError:
reference = int(reference)
def maineffect_func(value, reference=reference):
rvalue = []
keep = lrange(value.shape[0])
keep.pop(reference)
for i in range(len(keep)):
rvalue.append(value[keep[i]] - value[reference])
return np.array(rvalue)
keep = lrange(len(self.names()))
keep.pop(reference)
__names = self.names()
_names = ['%s-%s' % (__names[keep[i]], __names[reference]) for i in range(len(keep))]
value = Quantitative(_names, func=self,
termname='%s:maineffect' % self.termname,
transform=maineffect_func)
value.namespace = self.namespace
return value
def __getitem__(self, key):
"""
Retrieve the column corresponding to key in a Formula.
:Parameters:
key : one of the Factor's keys
:Returns: ndarray corresponding to key, when evaluated in
current namespace
"""
if not self.ordinal:
i = self.names().index('(%s==%s)' % (self.termname, str(key)))
return self()[i]
else:
v = self.namespace[self._name]
return np.array([(vv == key) for vv in v]).astype(np.float)
class Quantitative(Term):
"""
A subclass of term that can be used to apply point transformations
of another term, i.e. to take powers:
>>> import numpy as np
>>> from nipy.fixes.scipy.stats.models import formula
>>> X = np.linspace(0,10,101)
>>> x = formula.Term('X')
>>> x.namespace={'X':X}
>>> x2 = x**2
>>> print np.allclose(x()**2, x2())
True
>>> x3 = formula.Quantitative('x2', func=x, transform=lambda x: x**2)
>>> x3.namespace = x.namespace
>>> print np.allclose(x()**2, x3())
True
"""
def __init__(self, name, func=None, termname=None, transform=lambda x: x):
self.transform = transform
Term.__init__(self, name, func=func, termname=termname)
def __call__(self, *args, **kw):
"""
A quantitative is just like term, except there is an additional
transformation: self.transform.
"""
return self.transform(Term.__call__(self, *args, **kw))
class Formula(object):
"""
A formula object for manipulating design matrices in regression models,
essentially consisting of a list of term instances.
The object supports addition and multiplication which correspond
to concatenation and pairwise multiplication, respectively,
of the columns of the two formulas.
"""
def _get_namespace(self):
if isinstance(self.__namespace, np.ndarray):
return self.__namespace
else: return self.__namespace or default_namespace
def _set_namespace(self, value): self.__namespace = value
def _del_namespace(self): del self.__namespace
namespace = property(_get_namespace, _set_namespace, _del_namespace)
def _terms_changed(self):
self._names = self.names()
self._termnames = self.termnames()
def __init__(self, termlist, namespace=default_namespace):
"""
Create a formula from either:
i. a `formula` object
ii. a sequence of `term` instances
iii. one `term`
"""
self.__namespace = namespace
if isinstance(termlist, Formula):
self.terms = copy.copy(list(termlist.terms))
elif isinstance(termlist, list):
self.terms = termlist
elif isinstance(termlist, Term):
self.terms = [termlist]
else:
raise ValueError
self._terms_changed()
def __str__(self):
"""
String representation of list of termnames of a formula.
"""
value = []
for term in self.terms:
value += [term.termname]
return '<formula: %s>' % ' + '.join(value)
def __call__(self, *args, **kw):
"""
Create (transpose) of the design matrix of the formula within
namespace. Extra arguments are passed to each term instance. If
the formula just contains an intercept, then the keyword
argument 'nrow' indicates the number of rows (observations).
"""
if 'namespace' in kw:
namespace = kw['namespace']
else:
namespace = self.namespace
allvals = []
intercept = False
iindex = 0
for t in self.terms:
t = copy.copy(t)
t.namespace = namespace
val = t(*args, **kw)
isintercept = False
if hasattr(t, "termname"):
if t.termname == 'intercept':
intercept = True
isintercept = True
interceptindex = iindex
allvals.append(None)
if val.ndim == 1 and not isintercept:
val.shape = (1, val.shape[0])
allvals.append(val)
elif not isintercept:
allvals.append(val)
iindex += 1
if not intercept:
try:
allvals = np.concatenate(allvals)
except:
pass
else:
nrow = kw.get('nrow', -1)
if allvals != []:
if interceptindex > 0:
n = allvals[0].shape[1]
else:
n = allvals[1].shape[1]
allvals[interceptindex] = np.ones((1,n), np.float64)
allvals = np.concatenate(allvals)
elif nrow <= 1:
raise ValueError('with only intercept in formula, keyword \'nrow\' argument needed')
else:
allvals = I(nrow=nrow)
allvals.shape = (1,) + allvals.shape
return np.squeeze(allvals)
def hasterm(self, query_term):
"""
Determine whether a given term is in a formula.
"""
if not isinstance(query_term, Formula):
if isinstance(query_term, string_types):
try:
query = self[query_term]
return query.termname in self.termnames()
except:
return False
elif isinstance(query_term, Term):
return query_term.termname in self.termnames()
elif len(query_term.terms) == 1:
query_term = query_term.terms[0]
return query_term.termname in self.termnames()
else:
raise ValueError('more than one term passed to hasterm')
def __getitem__(self, name):
t = self.termnames()
if name in t:
return self.terms[t.index(name)]
else:
raise KeyError('formula has no such term: %s' % repr(name))
def termcolumns(self, query_term, dict=False):
"""
Return a list of the indices of all columns associated
to a given term.
"""
if self.hasterm(query_term):
names = query_term.names()
value = {}
for name in names:
value[name] = self._names.index(name)
else:
raise ValueError('term not in formula')
if dict:
return value
else:
return list(itervalues(value))
def names(self):
"""
Return a list of the names in the formula. The order of the
names corresponds to the order of the columns when self
is evaluated.
"""
allnames = []
for term in self.terms:
allnames += term.names()
return allnames
def termnames(self):
"""
Return a list of the term names in the formula. These
are the names of each term instance in self.
"""
names = []
for term in self.terms:
names += [term.termname]
return names
def design(self, *args, **kw):
"""
``transpose(self(*args, **kw))``
"""
return self(*args, **kw).T
def __mul__(self, other, nested=False):
"""
This returns a formula whose columns are the pairwise
product of the columns of self and other.
TO DO: check for nesting relationship. Should not be too difficult.
"""
other = Formula(other)
selftermnames = self.termnames()
othertermnames = other.termnames()
I = len(selftermnames)
J = len(othertermnames)
terms = []
termnames = []
for i in range(I):
for j in range(J):
termname = '%s*%s' % (str(selftermnames[i]), str(othertermnames[j]))
pieces = sorted(termname.split('*'))
termname = '*'.join(pieces)
termnames.append(termname)
selfnames = self.terms[i].names()
othernames = other.terms[j].names()
if self.terms[i].name is 'intercept':
_term = other.terms[j]
_term.namespace = other.namespace
elif other.terms[j].name is 'intercept':
_term = self.terms[i]
_term.namespace = self.namespace
else:
names = []
d1 = len(selfnames)
d2 = len(othernames)
for r in range(d1):
for s in range(d2):
name = '%s*%s' % (str(selfnames[r]), str(othernames[s]))
pieces = sorted(name.split('*'))
name = '*'.join(pieces)
names.append(name)
def product_func(value, d1=d1, d2=d2):
out = []
for r in range(d1):
for s in range(d2):
out.append(value[r] * value[d1+s])
return np.array(out)
cself = copy.copy(self.terms[i])
cother = copy.copy(other.terms[j])
sumterms = cself + cother
sumterms.terms = [cself, cother] # enforce the order we want
_term = Quantitative(names, func=sumterms,
termname=termname,
transform=product_func)
if _namespace_equal(self.namespace, other.namespace):
_term.namespace = self.namespace
terms.append(_term)
return Formula(terms)
def __add__(self, other):
"""
Return a formula whose columns are the
concatenation of the columns of self and other.
terms in the formula are sorted alphabetically.
"""
other = Formula(other)
terms = self.terms + other.terms
pieces = sorted([(term.name, term) for term in terms])
terms = [piece[1] for piece in pieces]
f = Formula(terms)
if _namespace_equal(self.namespace, other.namespace):
f.namespace = self.namespace
return f
def __sub__(self, other):
"""
Return a formula with all terms in other removed from self.
If other contains term instances not in formula, this
function does not raise an exception.
"""
other = Formula(other)
terms = copy.copy(self.terms)
for term in other.terms:
for i in range(len(terms)):
if terms[i].termname == term.termname:
terms.pop(i)
break
f = Formula(terms)
f.namespace = self.namespace
return f
def isnested(A, B, namespace=None):
"""
Is factor B nested within factor A or vice versa: a very crude test
which depends on the namespace.
If they are nested, returns (True, F) where F is the finest
level of the relationship. Otherwise, returns (False, None)
"""
if namespace is not None:
A = copy.copy(A); A.namespace = namespace
B = copy.copy(B); B.namespace = namespace
a = A(values=True)[0]
b = B(values=True)[0]
if len(a) != len(b):
raise ValueError('A() and B() should be sequences of the same length')
nA = len(set(a))
nB = len(set(b))
n = max(nA, nB)
AB = [(a[i],b[i]) for i in range(len(a))]
nAB = len(set(AB))
if nAB == n:
if nA > nB:
F = A
else:
F = B
return (True, F)
else:
return (False, None)
def _intercept_fn(nrow=1, **extra):
return np.ones((1,nrow))
I = Term('intercept', func=_intercept_fn)
I.__doc__ = """
Intercept term in a formula. If intercept is the
only term in the formula, then a keyword argument
\'nrow\' is needed.
>>> from nipy.fixes.scipy.stats.models.formula import Formula, I
>>> I()
array(1.0)
>>> I(nrow=5)
array([ 1., 1., 1., 1., 1.])
>>> f=Formula(I)
>>> f(nrow=5)
array([1, 1, 1, 1, 1])
"""
def interactions(terms, order=[1,2]):
"""
Output all pairwise interactions of given order of a
sequence of terms.
The argument order is a sequence specifying which order
of interactions should be generated -- the default
creates main effects and two-way interactions. If order
is an integer, it is changed to range(1,order+1), so
order=3 is equivalent to order=[1,2,3], generating
all one, two and three-way interactions.
If any entry of order is greater than len(terms), it is
effectively treated as len(terms).
>>> print interactions([Term(l) for l in ['a', 'b', 'c']])
<formula: a*b + a*c + b*c + a + b + c>
>>>
>>> print interactions([Term(l) for l in ['a', 'b', 'c']], order=list(range(5)))
<formula: a*b + a*b*c + a*c + b*c + a + b + c>
>>>
"""
l = len(terms)
values = {}
if np.asarray(order).shape == ():
order = lrange(1, int(order)+1)
# First order
for o in order:
I = np.indices((l,)*(o))
I.shape = (I.shape[0], np.product(I.shape[1:]))
for m in range(I.shape[1]):
# only keep combinations that have unique entries
if (np.unique(I[:,m]).shape == I[:,m].shape and
np.alltrue(np.equal(np.sort(I[:,m]), I[:,m]))):
ll = [terms[j] for j in I[:,m]]
v = ll[0]
for ii in range(len(ll)-1):
v *= ll[ii+1]
values[tuple(I[:,m])] = v
key = list(iterkeys(values))[0]
value = values[key]; del(values[key])
for v in itervalues(values):
value += v
return value
def _namespace_equal(space1, space2):
return space1 is space2
| bsd-3-clause |
freeJim/monserver | tools/examples/python/mongrel2/config/model.py | 92 | 7490 | from storm.locals import *
database = None
store = None
TABLES = ["server", "host", "route", "proxy", "directory", "handler",
"setting"]
def load_db(spec):
global database
global store
if not store:
database = create_database(spec)
store = Store(database)
return store
def clear_db():
for table in TABLES:
store.execute("DELETE FROM %s" % table)
def begin(config_db, clear=False):
store = load_db("sqlite:" + config_db)
store.mongrel2_clear=clear
if clear:
clear_db()
return store
def commit(servers, settings=None):
for server in servers:
store.add(server)
for host in server.hosts:
host.server = server
store.add(host)
for route in host.routes:
route.host = host
store.add(route)
if store.mongrel2_clear:
store.commit()
else:
print "Results won't be committed unless you begin(clear=True)."
if settings:
for k,v in settings.items():
store.add(Setting(unicode(k), unicode(v)))
store.commit()
class Server(object):
__storm_table__ = "server"
id = Int(primary = True)
uuid = Unicode()
access_log = Unicode()
error_log = Unicode()
chroot = Unicode()
default_host = Unicode()
name = Unicode()
pid_file = Unicode()
port = Int()
bind_addr = Unicode(default=unicode('0.0.0.0'))
use_ssl = Bool(default = 0)
def __init__(self, uuid=None, access_log=None, error_log=None,
chroot=None, default_host=None, name=None, pid_file=None,
port=None, hosts=None, bind_addr='0.0.0.0', use_ssl=False):
super(Server, self).__init__()
self.uuid = unicode(uuid)
self.access_log = unicode(access_log)
self.error_log = unicode(error_log)
self.chroot = unicode(chroot)
self.default_host = unicode(default_host)
self.name = unicode(name) if name else self.default_host
self.pid_file = unicode(pid_file)
self.port = port
self.bind_addr = unicode(bind_addr)
self.use_ssl = use_ssl
for h in hosts or []:
self.hosts.add(h)
def __repr__(self):
return "Server(uuid=%r, access_log=%r, error_log=%r, chroot=%r, default_host=%r, port=%d)" % (
self.uuid, self.access_log, self.error_log,
self.chroot, self.default_host, self.port)
class Host(object):
__storm_table__ = "host"
id = Int(primary = True)
server_id = Int()
server = Reference(server_id, Server.id)
maintenance = Bool(default = 0)
name = Unicode()
matching = Unicode()
def __init__(self, server=None, name=None, matching=None,
maintenance=False, routes=None):
super(Host, self).__init__()
self.server = server
self.name = unicode(name)
self.matching = matching or self.name
self.maintenance = maintenance
if routes:
for p,t in routes.items():
self.routes.add(Route(path=p, target=t))
def __repr__(self):
return "Host(maintenance=%d, name=%r, matching=%r)" % (
self.maintenance, self.name, self.matching)
Server.hosts = ReferenceSet(Server.id, Host.server_id)
class Handler(object):
__storm_table__ = "handler"
id = Int(primary = True)
send_spec = Unicode()
send_ident = Unicode()
recv_spec = Unicode()
recv_ident = Unicode()
raw_payload = Bool(default = 0)
protocol = Unicode(default = unicode('json'))
def __init__(self, send_spec, send_ident, recv_spec, recv_ident,
raw_payload=False, protocol='json'):
super(Handler, self).__init__()
self.send_spec = unicode(send_spec)
self.send_ident = unicode(send_ident)
self.recv_spec = unicode(recv_spec)
self.recv_ident = unicode(recv_ident)
self.raw_payload = raw_payload
self.protocol = unicode(protocol)
def __repr__(self):
return "Handler(send_spec=%r, send_ident=%r, recv_spec=%r, recv_ident=%r)" % (
self.send_spec, self.send_ident, self.recv_spec,
self.recv_ident)
class Proxy(object):
__storm_table__ = "proxy"
id = Int(primary = True)
addr = Unicode()
port = Int()
def __init__(self, addr, port):
super(Proxy, self).__init__()
self.addr = unicode(addr)
self.port = port
def __repr__(self):
return "Proxy(addr=%r, port=%d)" % (
self.addr, self.port)
class Dir(object):
__storm_table__ = "directory"
id = Int(primary = True)
base = Unicode()
index_file = Unicode()
default_ctype = Unicode()
cache_ttl = Int(default=0)
def __init__(self, base, index_file, default_ctype="text/plain", cache_ttl=0):
super(Dir, self).__init__()
self.base = unicode(base)
self.index_file = unicode(index_file)
self.default_ctype = unicode(default_ctype)
self.cache_ttl = cache_ttl
def __repr__(self):
return "Dir(base=%r, index_file=%r, default_ctype=%r)" % (
self.base, self.index_file, self.default_ctype)
class Route(object):
__storm_table__ = "route"
id = Int(primary = True)
path = Unicode()
reversed = Bool(default = 0)
host_id = Int()
host = Reference(host_id, Host.id)
target_id = Int()
target_type = Unicode()
_targets = {'dir': Dir,
'handler': Handler,
'proxy': Proxy}
def __init__(self, path=None, reversed=False, host=None, target=None):
super(Route, self).__init__()
self.path = unicode(path)
self.reversed = reversed
self.host = host
if target:
store.add(target)
store.commit()
self.target_id = target.id
self.target_type = unicode(target.__class__.__name__.lower())
def target_class(self):
return self._targets[self.target_type]
@property
def target(self):
kls = self.target_class()
targets = store.find(kls, kls.id == self.target_id)
assert targets.count() <= 1, "Routes should only map to one target."
return targets[0] if targets.count() else None
def __repr__(self):
return "Route(path=%r, reversed=%r, target=%r)" % (
self.path, self.reversed, self.target)
Host.routes = ReferenceSet(Host.id, Route.host_id)
class Log(object):
__storm_table__ = "log"
id = Int(primary = True)
who = Unicode()
what = Unicode()
happened_at = DateTime()
location = Unicode()
how = Unicode()
why = Unicode()
def __repr__(self):
return "[%s, %s@%s, %s] %s" % (
self.happened_at.isoformat(), self.who, self.location, self.what,
self.why)
class MIMEType(object):
__storm_table__ = "mimetype"
id = Int(primary = True)
mimetype = Unicode()
extension = Unicode()
def __repr__(self):
return "MIMEType(mimetype=%r, extension=%r)" % (
self.mimetype, self.extension)
class Setting(object):
__storm_table__ = "setting"
id = Int(primary = True)
key = Unicode()
value = Unicode()
def __init__(self, key, value):
super(Setting, self).__init__()
self.key = key
self.value = value
def __repr__(self):
return "Setting(key=%r, value=%r)" % (self.key, self.value)
| bsd-3-clause |
Rocamadour7/ml_tutorial | 05. Clustering/titanic-data-example.py | 1 | 1721 | import numpy as np
from sklearn.cluster import KMeans
from sklearn import preprocessing
import pandas as pd
'''
Pclass Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd)
survival Survival (0 = No; 1 = Yes)
name Name
sex Sex
age Age
sibsp Number of Siblings/Spouses Aboard
parch Number of Parents/Children Aboard
ticket Ticket Number
fare Passenger Fare (British pound)
cabin Cabin
embarked Port of Embarkation (C = Cherbourg; Q = Queenstown; S = Southampton)
boat Lifeboat
body Body Identification Number
home.dest Home/Destination
'''
df = pd.read_excel('titanic.xls')
df.drop(['body', 'name'], 1, inplace=True)
df.fillna(0, inplace=True)
def handle_non_numerical_data(df):
columns = df.columns.values
for column in columns:
text_digit_vals = {}
def convert_to_int(val):
return text_digit_vals[val]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_contents = df[column].values.tolist()
unique_elements = set(column_contents)
x = 0
for unique in unique_elements:
if unique not in text_digit_vals:
text_digit_vals[unique] = x
x += 1
df[column] = list(map(convert_to_int, df[column]))
return df
df = handle_non_numerical_data(df)
X = np.array(df.drop(['survived'], 1).astype(float))
X = preprocessing.scale(X)
y = np.array(df['survived'])
clf = KMeans(n_clusters=2)
clf.fit(X)
correct = 0
for i in range(len(X)):
predict_me = np.array(X[i].astype(float))
predict_me = predict_me.reshape(-1, len(predict_me))
prediction = clf.predict(predict_me)
if prediction[0] == y[i]:
correct += 1
print(correct/len(X))
| mit |
scudre/alarm-central-station-receiver | alarm_central_station_receiver/contact_id/callup.py | 1 | 3225 | """
Copyright (2018) Chris Scuderi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import re
from alarm_central_station_receiver.contact_id import handshake
def calc_checksum(code):
checksum = 0
for digit in code:
# 0 is treated as 10 in the checksum calculation
checksum += int(digit, 16) if digit != '0' else 10
return checksum % 15
def parse_alarm_codes(code_str):
pattern = "([0-9]{4}18[136][0-9abcdef]{8}[0-9abcdef]?(?![0-9]{3}18[136]))"
codes = []
for code in re.split(pattern, code_str):
if not code:
continue
# There seems to be some buggyness with either TigerJet or the alarm system
# when sending the last checksum digit when its above 'c'
if len(code) == 15:
# XXX hack - Tigerjet can't detect the highest DTMF code of 15
if calc_checksum(code) == 0:
code += 'f'
# XXX hack - Tigerjet can't detect the high DTMF code of 14
if calc_checksum(code) == 1:
code += 'e'
if calc_checksum(code) == 2:
code += 'd'
codes.append((code, calc_checksum(code) == 0))
return codes
def collect_alarm_codes(fd):
logging.info("Collecting Alarm Codes")
code_str = ''
# Play the alarm handshake to start getting the codes
with handshake.Handshake():
off_hook, digit = get_phone_status(fd)
while off_hook:
code_str += format(digit, 'x') if digit != -1 else ''
off_hook, digit = get_phone_status(fd)
logging.info("Alarm Hung Up")
logging.info('Code String: %s', code_str)
return code_str
def validate_alarm_call_in(fd, expected):
number = '000'
off_hook, digit = get_phone_status(fd)
if off_hook:
logging.info("Phone Off The Hook")
while off_hook:
if digit != -1:
logging.debug("Digit %d", digit)
number = number[1:] + format(digit, 'x')
logging.debug("Number %s", number)
if number == expected:
logging.info("Alarm Call In Received")
break
off_hook, digit = get_phone_status(fd)
logging.debug("Number %s", number)
if not off_hook:
logging.info("Phone On The Hook")
return number == expected and off_hook
def get_phone_status(fd):
status = bytearray(fd.read(2))
digit = status[0]
if digit < 11:
digit = digit - 1
off_hook = ((status[1] & 0x80) == 0x80)
return (off_hook, digit)
def handle_alarm_calling(fd, number):
codes = []
if validate_alarm_call_in(fd, number):
code_str = collect_alarm_codes(fd)
codes = parse_alarm_codes(code_str)
return codes
| apache-2.0 |
zombi-x/android_kernel_htc_m7 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
google/tangent | tests/test_optimization.py | 1 | 2734 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gast
import pytest
from tangent import optimization
from tangent import quoting
def test_assignment_propagation():
def f(x):
y = x
z = y
return z
node = quoting.parse_function(f)
node = optimization.assignment_propagation(node)
assert len(node.body[0].body) == 2
def test_dce():
def f(x):
y = 2 * x
return x
node = quoting.parse_function(f)
node = optimization.dead_code_elimination(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_fixed_point():
def f(x):
y = g(x)
z = h(y)
return x
node = quoting.parse_function(f)
node = optimization.optimize(node)
assert isinstance(node.body[0].body[0], gast.Return)
def test_constant_folding():
def f(x):
x = 1 * x
x = 0 * x
x = x * 1
x = x * 0
x = x * 2
x = 2 * x
x = 2 * 3
x = 1 + x
x = 0 + x
x = x + 1
x = x + 0
x = x + 2
x = 2 + x
x = 2 + 3
x = 1 - x
x = 0 - x
x = x - 1
x = x - 0
x = x - 2
x = 2 - x
x = 2 - 3
x = 1 / x
x = 0 / x
x = x / 1
x = x / 0
x = x / 2
x = 2 / x
x = 2 / 8
x = 1 ** x
x = 0 ** x
x = x ** 1
x = x ** 0
x = x ** 2
x = 2 ** x
x = 2 ** 3
def f_opt(x):
x = x
x = 0
x = x
x = 0
x = x * 2
x = 2 * x
x = 6
x = 1 + x
x = x
x = x + 1
x = x
x = x + 2
x = 2 + x
x = 5
x = 1 - x
x = -x
x = x - 1
x = x
x = x - 2
x = 2 - x
x = -1
x = 1 / x
x = 0 / x
x = x
x = x / 0
x = x / 2
x = 2 / x
x = 0.25
x = 1
x = 0
x = x
x = 1
x = x ** 2
x = 2 ** x
x = 8
node = quoting.parse_function(f)
node = optimization.constant_folding(node)
node_opt = quoting.parse_function(f_opt)
lines = quoting.to_source(node).strip().split('\n')[1:]
lines_opt = quoting.to_source(node_opt).strip().split('\n')[1:]
# In Python 2 integer division could be on, in which case...
if 1 / 2 == 0:
lines_opt[27] = ' x = 0'
assert lines == lines_opt
if __name__ == '__main__':
assert not pytest.main([__file__])
| apache-2.0 |
gavioto/rethinkdb | external/v8_3.30.33.16/buildtools/clang_format/script/clang-format-sublime.py | 164 | 2440 | # This file is a minimal clang-format sublime-integration. To install:
# - Change 'binary' if clang-format is not on the path (see below).
# - Put this file into your sublime Packages directory, e.g. on Linux:
# ~/.config/sublime-text-2/Packages/User/clang-format-sublime.py
# - Add a key binding:
# { "keys": ["ctrl+shift+c"], "command": "clang_format" },
#
# With this integration you can press the bound key and clang-format will
# format the current lines and selections for all cursor positions. The lines
# or regions are extended to the next bigger syntactic entities.
#
# It operates on the current, potentially unsaved buffer and does not create
# or save any files. To revert a formatting, just undo.
from __future__ import print_function
import sublime
import sublime_plugin
import subprocess
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
# Change this to format according to other formatting styles. See the output of
# 'clang-format --help' for a list of supported styles. The default looks for
# a '.clang-format' or '_clang-format' file to indicate the style that should be
# used.
style = 'file'
class ClangFormatCommand(sublime_plugin.TextCommand):
def run(self, edit):
encoding = self.view.encoding()
if encoding == 'Undefined':
encoding = 'utf-8'
regions = []
command = [binary, '-style', style]
for region in self.view.sel():
regions.append(region)
region_offset = min(region.a, region.b)
region_length = abs(region.b - region.a)
command.extend(['-offset', str(region_offset),
'-length', str(region_length),
'-assume-filename', str(self.view.file_name())])
old_viewport_position = self.view.viewport_position()
buf = self.view.substr(sublime.Region(0, self.view.size()))
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output, error = p.communicate(buf.encode(encoding))
if error:
print(error)
self.view.replace(
edit, sublime.Region(0, self.view.size()),
output.decode(encoding))
self.view.sel().clear()
for region in regions:
self.view.sel().add(region)
# FIXME: Without the 10ms delay, the viewport sometimes jumps.
sublime.set_timeout(lambda: self.view.set_viewport_position(
old_viewport_position, False), 10)
| agpl-3.0 |
ucbrise/clipper | monitoring/front_end_exporter.py | 2 | 2298 | import requests
from flatten_json import flatten
import itertools
import time
from prometheus_client import start_http_server
from prometheus_client.core import GaugeMetricFamily, REGISTRY
import argparse
import logging
parser = argparse.ArgumentParser(
description='Spin up a node exporter for query_frontend.')
parser.add_argument(
'--query_frontend_name',
metavar='str',
type=str,
required=True,
help='The name of docker container in clipper_network')
args = parser.parse_args()
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
query_frontend_id = args.query_frontend_name
ADDRESS = 'http://{}/metrics'.format(query_frontend_id)
logger.info("Scraping {}".format(ADDRESS))
def load_metric():
try:
res = requests.get(ADDRESS)
return res.json()
except Exception as e:
logger.warning("Scrape Failed! Error: {}\n".format(e))
return dict()
def multi_dict_unpacking(lst):
"""
Receive a list of dictionaries, join them into one big dictionary
"""
result = {}
for d in lst:
for key, val in d.items():
result[key] = val
return result
def parse_metric(metrics):
if len(metrics) == 0:
# Return empty dictionary if it's empty
return metrics
wo_type = list(itertools.chain.from_iterable(metrics.values()))
wo_type_flattened = list(itertools.chain([flatten(d) for d in wo_type]))
wo_type_joined = multi_dict_unpacking(wo_type_flattened)
return wo_type_joined
class ClipperCollector(object):
def __init__(self):
pass
def collect(self):
metrics = parse_metric(load_metric())
for name, val in metrics.items():
try:
if '.' or 'e' in val:
val = float(val)
else:
val = int(val)
name = name.replace(':', '_').replace('-', '_')
yield GaugeMetricFamily(name, 'help', value=val)
except ValueError:
pass
if __name__ == '__main__':
REGISTRY.register(ClipperCollector())
start_http_server(1390)
while True:
time.sleep(1)
| apache-2.0 |
ntonjeta/iidea-Docker | examples/sobel/src/boost_1_63_0/libs/python/config/__init__.py | 5 | 5259 | #
# Copyright (c) 2016 Stefan Seefeld
# All rights reserved.
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from SCons.Variables import *
from SCons.Script import AddOption
from collections import OrderedDict
import platform
from . import ui
from . import cxx
from . import python
from . import numpy
from . import boost
def add_options(vars):
ui.add_option('-V', '--verbose', dest='verbose', action='store_true', help='verbose mode: print full commands.')
ui.add_option('--no-numpy', dest='numpy', action='store_false', help='do not attempt to build NumPy bindings.')
python.add_options(vars)
numpy.add_options(vars)
boost.add_options(vars)
vars.Add('CXX')
vars.Add('CPPPATH', converter=lambda v:v.split())
vars.Add('CCFLAGS', converter=lambda v:v.split())
vars.Add('CXXFLAGS', converter=lambda v:v.split())
vars.Add('LIBPATH', converter=lambda v:v.split())
vars.Add('LIBS', converter=lambda v:v.split())
vars.Add('PYTHON')
vars.Add('PYTHONLIBS')
vars.Add('prefix')
vars.Add('boostbook_prefix')
vars.Add('CXX11')
vars.Add('NUMPY')
vars.Add('NUMPY_CPPPATH', converter=lambda v:v.split())
ui.add_variable(vars, ("arch", "target architeture", platform.machine()))
ui.add_variable(vars, ("toolchain", "toolchain to use", 'gcc'))
ui.add_variable(vars, ListVariable("variant", "Build configuration", "release", ["release", "debug", "profile"]))
ui.add_variable(vars, ListVariable("link", "Library linking", "dynamic", ["static", "dynamic"]))
ui.add_variable(vars, ListVariable("threading", "Multi-threading support", "multi", ["single", "multi"]))
ui.add_variable(vars, EnumVariable("layout", "Layout of library names and header locations", "versioned", ["versioned", "system"]))
ui.add_variable(vars, PathVariable("stagedir", "If --stage is passed install only compiled library files in this location", "stage", PathVariable.PathAccept))
ui.add_variable(vars, PathVariable("prefix", "Install prefix", "/usr/local", PathVariable.PathAccept))
def get_checks(env):
checks = OrderedDict()
checks['cxx'] = cxx.check
checks['python'] = python.check
if env.GetOption('numpy') is not False:
checks['numpy'] = numpy.check
else:
env['NUMPY'] = False
checks['boost'] = boost.check
return checks
def set_property(env, **kw):
from toolchains.gcc import features as gcc_features
from toolchains.msvc import features as msvc_features
if 'gcc' in env['TOOLS']: features = gcc_features
elif 'msvc' in env['TOOLS']: features = msvc_features
else: raise Error('unknown toolchain')
features.init_once(env)
for (prop,value) in kw.items():
getattr(features, prop, lambda x, y : None)(env, value)
env[prop.upper()] = value
def boost_suffix(env):
suffix = str()
if env["layout"] == "versioned":
if "gcc" in env["TOOLS"]:
if env['CXX'] in ('clang', 'clang++'):
suffix += "-clang" + "".join(env["CXXVERSION"].split(".")[0:2])
else: # assume g++
suffix += "-gcc" + "".join(env["CXXVERSION"].split(".")[0:2])
if env["THREADING"] == "multi":
suffix += "-mt"
if env["DEBUG"]:
suffix += "-d"
if env["layout"] == "versioned":
suffix += "-" + "_".join(env["BPL_VERSION"].split("."))
return suffix
def prepare_build_dir(env):
vars = {}
env["boost_suffix"] = boost_suffix
build_dir="bin.SCons"
# FIXME: Support 'toolchain' variable properly.
# For now, we simply check whether $CXX refers to clang or gcc.
if "gcc" in env["TOOLS"]:
if env['CXX'] in ('clang', 'clang++'):
build_dir+="/clang-%s"%env["CXXVERSION"]
else: # assume g++
build_dir+="/gcc-%s"%env["CXXVERSION"]
default_cxxflags = ['-ftemplate-depth-128', '-Wall', '-g', '-O2']
vars['CXXFLAGS'] = env.get('CXXFLAGS', default_cxxflags)
elif "msvc" in env["TOOLS"]:
build_dir+="/msvc-%s"%env["MSVS_VERSION"]
vars['BOOST_BUILD_DIR'] = build_dir
vars['BOOST_SUFFIX'] = "${boost_suffix(__env__)}"
env.Replace(**vars)
return build_dir
def variants(env):
env.Prepend(CPPPATH = "#/include", CPPDEFINES = ["BOOST_ALL_NO_LIB=1"])
set_property(env, architecture = env['TARGET_ARCH'])
for variant in env["variant"]:
e = env.Clone()
e["current_variant"] = variant
set_property(env, profile = False)
if variant == "release":
set_property(e, optimize = "speed", debug = False)
elif variant == "debug":
set_property(e, optimize = "no", debug = True)
elif variant == "profile":
set_property(e, optimize = "speed", profile = True, debug = True)
for linking in env["link"]:
e["linking"] = linking
if linking == "dynamic":
e["LINK_DYNAMIC"] = True
else:
e["LINK_DYNAMIC"] = False
for threading in e["threading"]:
e["current_threading"] = threading
set_property(e, threading = threading)
yield e
| agpl-3.0 |
JackDanger/sentry | tests/sentry/web/frontend/test_auth_login.py | 6 | 4389 | from __future__ import absolute_import
from django.utils.http import urlquote
from django.core.urlresolvers import reverse
from exam import fixture
from sentry.testutils import TestCase
from sentry.models import User
# TODO(dcramer): need tests for SSO behavior and single org behavior
class AuthLoginTest(TestCase):
@fixture
def path(self):
return reverse('sentry-login')
def test_renders_correct_template(self):
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/login.html')
def test_renders_session_expire_message(self):
self.client.cookies['session_expired'] = '1'
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/login.html')
assert len(resp.context['messages']) == 1
def test_login_invalid_password(self):
# load it once for test cookie
self.client.get(self.path)
resp = self.client.post(self.path, {
'username': self.user.username,
'password': 'bizbar',
'op': 'login',
})
assert resp.status_code == 200
assert resp.context['login_form'].errors['__all__'] == [
u'Please enter a correct username and password. Note that both fields may be case-sensitive.'
]
def test_login_valid_credentials(self):
# load it once for test cookie
self.client.get(self.path)
resp = self.client.post(self.path, {
'username': self.user.username,
'password': 'admin',
'op': 'login',
})
assert resp.status_code == 302
def test_registration_disabled(self):
with self.feature('auth:register', False):
resp = self.client.get(self.path)
assert resp.context['register_form'] is None
def test_registration_valid(self):
with self.feature('auth:register'):
resp = self.client.post(self.path, {
'username': 'test-a-really-long-email-address@example.com',
'password': 'foobar',
'op': 'register',
})
assert resp.status_code == 302
user = User.objects.get(username='test-a-really-long-email-address@example.com')
assert user.email == 'test-a-really-long-email-address@example.com'
assert user.check_password('foobar')
def test_register_renders_correct_template(self):
register_path = reverse('sentry-register')
resp = self.client.get(register_path)
assert resp.status_code == 200
assert resp.context['op'] == 'register'
self.assertTemplateUsed('sentry/login.html')
def test_already_logged_in(self):
self.login_as(self.user)
with self.feature('organizations:create'):
resp = self.client.get(self.path)
assert resp.status_code == 302
assert resp['Location'] == 'http://testserver' + reverse('sentry-create-organization')
def test_register_prefills_invite_email(self):
self.session['invite_email'] = 'foo@example.com'
self.session['can_register'] = True
self.save_session()
register_path = reverse('sentry-register')
resp = self.client.get(register_path)
assert resp.status_code == 200
assert resp.context['op'] == 'register'
assert resp.context['register_form'].initial['username'] == 'foo@example.com'
self.assertTemplateUsed('sentry/login.html')
def test_redirects_to_relative_next_url(self):
next = '/welcome'
self.client.get(self.path + '?next=' + next)
resp = self.client.post(self.path, {
'username': self.user.username,
'password': 'admin',
'op': 'login',
})
assert resp.status_code == 302
assert resp.get('Location', '').endswith(next)
def test_doesnt_redirect_to_external_next_url(self):
next = "http://example.com"
self.client.get(self.path + '?next=' + urlquote(next))
resp = self.client.post(self.path, {
'username': self.user.username,
'password': 'admin',
'op': 'login',
})
assert resp.status_code == 302
assert next not in resp['Location']
assert resp['Location'] == 'http://testserver/auth/login/'
| bsd-3-clause |
huytd/dejavu | dejavu/fingerprint.py | 1 | 6020 | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import (generate_binary_structure,
iterate_structure, binary_erosion)
import hashlib
from operator import itemgetter
IDX_FREQ_I = 0
IDX_TIME_J = 1
######################################################################
# Sampling rate, related to the Nyquist conditions, which affects
# the range frequencies we can detect.
DEFAULT_FS = 44100
######################################################################
# Size of the FFT window, affects frequency granularity
DEFAULT_WINDOW_SIZE = 4096
######################################################################
# Ratio by which each sequential window overlaps the last and the
# next window. Higher overlap will allow a higher granularity of offset
# matching, but potentially more fingerprints.
DEFAULT_OVERLAP_RATIO = 0.5
######################################################################
# Degree to which a fingerprint can be paired with its neighbors --
# higher will cause more fingerprints, but potentially better accuracy.
DEFAULT_FAN_VALUE = 15
######################################################################
# Minimum amplitude in spectrogram in order to be considered a peak.
# This can be raised to reduce number of fingerprints, but can negatively
# affect accuracy.
DEFAULT_AMP_MIN = 10
######################################################################
# Number of cells around an amplitude peak in the spectrogram in order
# for Dejavu to consider it a spectral peak. Higher values mean less
# fingerprints and faster matching, but can potentially affect accuracy.
PEAK_NEIGHBORHOOD_SIZE = 20
######################################################################
# Thresholds on how close or far fingerprints can be in time in order
# to be paired as a fingerprint. If your max is too low, higher values of
# DEFAULT_FAN_VALUE may not perform as expected.
MIN_HASH_TIME_DELTA = 0
MAX_HASH_TIME_DELTA = 200
######################################################################
# If True, will sort peaks temporally for fingerprinting;
# not sorting will cut down number of fingerprints, but potentially
# affect performance.
PEAK_SORT = True
######################################################################
# Number of bits to throw away from the front of the SHA1 hash in the
# fingerprint calculation. The more you throw away, the less storage, but
# potentially higher collisions and misclassifications when identifying songs.
FINGERPRINT_REDUCTION = 20
def fingerprint(channel_samples, Fs=DEFAULT_FS,
wsize=DEFAULT_WINDOW_SIZE,
wratio=DEFAULT_OVERLAP_RATIO,
fan_value=DEFAULT_FAN_VALUE,
amp_min=DEFAULT_AMP_MIN):
"""
FFT the channel, log transform output, find local maxima, then return
locally sensitive hashes.
"""
# FFT the signal and extract frequency components
arr2D = mlab.specgram(
channel_samples,
NFFT=wsize,
Fs=Fs,
window=mlab.window_hanning,
noverlap=int(wsize * wratio))[0]
# apply log transform since specgram() returns linear array
arr2D = 10 * np.log10(arr2D)
arr2D[arr2D == -np.inf] = 0 # replace infs with zeros
# find local maxima
local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min)
# return hashes
return generate_hashes(local_maxima, fan_value=fan_value)
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
struct = generate_binary_structure(2, 1)
neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
# find local maxima using our fliter shape
local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
background = (arr2D == 0)
eroded_background = binary_erosion(background, structure=neighborhood,
border_value=1)
# Boolean mask of arr2D with True at peaks
detected_peaks = local_max - eroded_background
# extract peaks
amps = arr2D[detected_peaks]
j, i = np.where(detected_peaks)
# filter peaks
amps = amps.flatten()
peaks = zip(i, j, amps)
peaks_filtered = [x for x in peaks if x[2] > amp_min] # freq, time, amp
# get indices for frequency and time
frequency_idx = [x[1] for x in peaks_filtered]
time_idx = [x[0] for x in peaks_filtered]
if plot:
# scatter of the peaks
fig, ax = plt.subplots()
ax.imshow(arr2D)
ax.scatter(time_idx, frequency_idx)
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
ax.set_title("Spectrogram")
plt.gca().invert_yaxis()
plt.show()
return zip(frequency_idx, time_idx)
def generate_hashes(peaks, fan_value=DEFAULT_FAN_VALUE):
"""
Hash list structure:
sha1_hash[0:20] time_offset
[(e05b341a9b77a51fd26, 32), ... ]
"""
fingerprinted = set() # to avoid rehashing same pairs
if PEAK_SORT:
peaks.sort(key=itemgetter(1))
for i in range(len(peaks)):
for j in range(1, fan_value):
if (i + j) < len(peaks) and not (i, i + j) in fingerprinted:
freq1 = peaks[i][IDX_FREQ_I]
freq2 = peaks[i + j][IDX_FREQ_I]
t1 = peaks[i][IDX_TIME_J]
t2 = peaks[i + j][IDX_TIME_J]
t_delta = t2 - t1
if t_delta >= MIN_HASH_TIME_DELTA and t_delta <= MAX_HASH_TIME_DELTA:
h = hashlib.sha1(
"%s|%s|%s" % (str(freq1), str(freq2), str(t_delta)))
yield (h.hexdigest()[0:FINGERPRINT_REDUCTION], t1)
# ensure we don't repeat hashing
fingerprinted.add((i, i + j))
| mit |
vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/apps/search/src/search/views.py | 4 | 19985 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.translation import ugettext as _
from desktop.lib.django_util import JsonResponse, render
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.rest.http_client import RestException
from desktop.models import Document2, Document
from libsolr.api import SolrApi
from indexer.management.commands import indexer_setup
from search.api import _guess_gap, _zoom_range_facet, _new_range_facet
from search.conf import SOLR_URL, LATEST
from search.data_export import download as export_download
from search.decorators import allow_owner_only, allow_viewer_only
from search.management.commands import search_setup
from search.models import Collection2, augment_solr_response, augment_solr_exception, pairwise2
from search.search_controller import SearchController
LOG = logging.getLogger(__name__)
def index(request):
hue_collections = SearchController(request.user).get_search_collections()
collection_id = request.GET.get('collection')
if not hue_collections or not collection_id:
return admin_collections(request, True)
try:
collection_doc = Document2.objects.get(id=collection_id)
collection_doc.doc.get().can_read_or_exception(request.user)
collection = Collection2(request.user, document=collection_doc)
except Exception, e:
raise PopupException(e, title=_("Dashboard does not exist or you don't have the permission to access it."))
query = {'qs': [{'q': ''}], 'fqs': [], 'start': 0}
return render('search.mako', request, {
'collection': collection,
'query': query,
'initial': json.dumps({'collections': [], 'layout': [], 'is_latest': LATEST.get()}),
'is_owner': collection_doc.doc.get().can_write(request.user)
})
def new_search(request):
collections = SearchController(request.user).get_all_indexes()
if not collections:
return no_collections(request)
collection = Collection2(user=request.user, name=collections[0])
query = {'qs': [{'q': ''}], 'fqs': [], 'start': 0}
return render('search.mako', request, {
'collection': collection,
'query': query,
'initial': json.dumps({
'collections': collections,
'layout': [
{"size":2,"rows":[{"widgets":[]}],"drops":["temp"],"klass":"card card-home card-column span2"},
{"size":10,"rows":[{"widgets":[
{"size":12,"name":"Filter Bar","widgetType":"filter-widget", "id":"99923aef-b233-9420-96c6-15d48293532b",
"properties":{},"offset":0,"isLoading":True,"klass":"card card-widget span12"}]},
{"widgets":[
{"size":12,"name":"Grid Results","widgetType":"resultset-widget", "id":"14023aef-b233-9420-96c6-15d48293532b",
"properties":{},"offset":0,"isLoading":True,"klass":"card card-widget span12"}]}],
"drops":["temp"],"klass":"card card-home card-column span10"},
],
'is_latest': LATEST.get()
}),
'is_owner': True
})
def browse(request, name):
collections = SearchController(request.user).get_all_indexes()
if not collections:
return no_collections(request)
collection = Collection2(user=request.user, name=name)
query = {'qs': [{'q': ''}], 'fqs': [], 'start': 0}
return render('search.mako', request, {
'collection': collection,
'query': query,
'initial': json.dumps({
'autoLoad': True,
'collections': collections,
'layout': [
{"size":12,"rows":[{"widgets":[
{"size":12,"name":"Grid Results","id":"52f07188-f30f-1296-2450-f77e02e1a5c0","widgetType":"resultset-widget",
"properties":{},"offset":0,"isLoading":True,"klass":"card card-widget span12"}]}],
"drops":["temp"],"klass":"card card-home card-column span10"}
],
'is_latest': LATEST.get()
}),
'is_owner': True
})
@allow_viewer_only
def search(request):
response = {}
collection = json.loads(request.POST.get('collection', '{}'))
query = json.loads(request.POST.get('query', '{}'))
query['download'] = 'download' in request.POST
if collection:
try:
response = SolrApi(SOLR_URL.get(), request.user).query(collection, query)
response = augment_solr_response(response, collection, query)
except RestException, e:
try:
response['error'] = json.loads(e.message)['error']['msg']
except:
LOG.exception('failed to parse json response')
response['error'] = force_unicode(e)
except Exception, e:
raise PopupException(e, title=_('Error while accessing Solr'))
response['error'] = force_unicode(e)
else:
response['error'] = _('There is no collection to search.')
if 'error' in response:
augment_solr_exception(response, collection)
return JsonResponse(response)
@allow_owner_only
def save(request):
response = {'status': -1}
collection = json.loads(request.POST.get('collection', '{}'))
layout = json.loads(request.POST.get('layout', '{}'))
collection['template']['extracode'] = escape(collection['template']['extracode'])
if collection:
if collection['id']:
dashboard_doc = Document2.objects.get(id=collection['id'])
else:
dashboard_doc = Document2.objects.create(name=collection['name'], uuid=collection['uuid'], type='search-dashboard', owner=request.user, description=collection['label'])
Document.objects.link(dashboard_doc, owner=request.user, name=collection['name'], description=collection['label'], extra='search-dashboard')
dashboard_doc.update_data({
'collection': collection,
'layout': layout
})
dashboard_doc1 = dashboard_doc.doc.get()
dashboard_doc.name = dashboard_doc1.name = collection['label']
dashboard_doc.description = dashboard_doc1.description = collection['description']
dashboard_doc.save()
dashboard_doc1.save()
response['status'] = 0
response['id'] = dashboard_doc.id
response['message'] = _('Page saved !')
else:
response['message'] = _('There is no collection to search.')
return JsonResponse(response)
@allow_owner_only
def save_definition(request):
response = {'status': -1}
collection = json.loads(request.POST.get('collection', '{}')) # id
query = json.loads(request.POST.get('query', '{}'))
query['name'] = 'My def'
query['uuid'] = 'uuid'
query['name'] = 'My def'
if collection and query:
collection = Collection.objects.get(id=collection['id'])
if query['id']:
definition_doc = Document2.objects.get(id=collection['id'])
else:
definition_doc = Document2.objects.create(name=query['name'], uuid=query['uuid'], type='search-definition', owner=request.user, dependencies=[collection])
#Document.objects.link(coordinator_doc, owner=coordinator_doc.owner, name=coordinator_doc.name, description=coordinator_doc.description, extra='coordinator2')
definition_doc.update_data(query)
definition_doc.save()
response['status'] = 0
response['id'] = definition_doc.id
response['message'] = _('Definition saved !')
else:
response['message'] = _('There is no collection to search.')
return JsonResponse(response)
@allow_viewer_only
def download(request):
try:
file_format = 'csv' if 'csv' in request.POST else 'xls' if 'xls' in request.POST else 'json'
response = search(request)
if file_format == 'json':
docs = json.loads(response.content)['response']['docs']
resp = JsonResponse(docs, safe=False)
resp['Content-Disposition'] = 'attachment; filename=%s.%s' % ('query_result', file_format)
return resp
else:
collection = json.loads(request.POST.get('collection', '{}'))
return export_download(json.loads(response.content), file_format, collection)
except Exception, e:
raise PopupException(_("Could not download search results: %s") % e)
def no_collections(request):
return render('no_collections.mako', request, {})
def admin_collections(request, is_redirect=False):
existing_hue_collections = SearchController(request.user).get_search_collections()
if request.GET.get('format') == 'json':
collections = []
for collection in existing_hue_collections:
massaged_collection = collection.to_dict()
massaged_collection['isOwner'] = collection.doc.get().can_write(request.user)
collections.append(massaged_collection)
return JsonResponse(collections, safe=False)
return render('admin_collections.mako', request, {
'existing_hue_collections': existing_hue_collections,
'is_redirect': is_redirect
})
def admin_collection_delete(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
collections = json.loads(request.POST.get('collections'))
searcher = SearchController(request.user)
response = {
'result': searcher.delete_collections([collection['id'] for collection in collections])
}
return JsonResponse(response)
def admin_collection_copy(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
collections = json.loads(request.POST.get('collections'))
searcher = SearchController(request.user)
response = {
'result': searcher.copy_collections([collection['id'] for collection in collections])
}
return JsonResponse(response)
def query_suggest(request, collection_id, query=""):
hue_collection = Collection.objects.get(id=collection_id)
result = {'status': -1, 'message': 'Error'}
solr_query = {}
solr_query['collection'] = hue_collection.name
solr_query['q'] = query
try:
response = SolrApi(SOLR_URL.get(), request.user).suggest(solr_query, hue_collection)
result['message'] = response
result['status'] = 0
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
def index_fields_dynamic(request):
result = {'status': -1, 'message': 'Error'}
try:
name = request.POST['name']
dynamic_fields = SolrApi(SOLR_URL.get(), request.user).luke(name)
result['message'] = ''
result['fields'] = [Collection2._make_field(name, properties)
for name, properties in dynamic_fields['fields'].iteritems() if 'dynamicBase' in properties]
result['gridlayout_header_fields'] = [Collection2._make_gridlayout_header_field({'name': name}, True)
for name, properties in dynamic_fields['fields'].iteritems() if 'dynamicBase' in properties]
result['status'] = 0
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
@allow_viewer_only
def get_document(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
doc_id = request.POST.get('id')
if doc_id:
result['doc'] = SolrApi(SOLR_URL.get(), request.user).get(collection['name'], doc_id)
if result['doc']['doc']:
result['status'] = 0
result['message'] = ''
else:
result['status'] = 1
result['message'] = _('No document was returned by Solr.')
else:
result['message'] = _('This document does not have any index id.')
result['status'] = 1
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
@allow_viewer_only
def get_stats(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
query = json.loads(request.POST.get('query', '{}'))
analysis = json.loads(request.POST.get('analysis', '{}'))
field = analysis['name']
facet = analysis['stats']['facet']
result['stats'] = SolrApi(SOLR_URL.get(), request.user).stats(collection['name'], [field], query, facet)
result['status'] = 0
result['message'] = ''
except Exception, e:
result['message'] = force_unicode(e)
if 'not currently supported' in result['message']:
result['status'] = 1
result['message'] = _('This field does not support stats')
return JsonResponse(result)
@allow_viewer_only
def get_terms(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
analysis = json.loads(request.POST.get('analysis', '{}'))
field = analysis['name']
properties = {
'terms.limit': 25,
'terms.prefix': analysis['terms']['prefix']
# lower
# limit
# mincount
# maxcount
}
result['terms'] = SolrApi(SOLR_URL.get(), request.user).terms(collection['name'], field, properties)
result['terms'] = pairwise2(field, [], result['terms']['terms'][field])
result['status'] = 0
result['message'] = ''
except Exception, e:
result['message'] = force_unicode(e)
if 'not currently supported' in result['message']:
result['status'] = 1
result['message'] = _('This field does not support stats')
return JsonResponse(result)
@allow_viewer_only
def get_timeline(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
query = json.loads(request.POST.get('query', '{}'))
facet = json.loads(request.POST.get('facet', '{}'))
qdata = json.loads(request.POST.get('qdata', '{}'))
multiQ = request.POST.get('multiQ', 'query')
if multiQ == 'query':
label = qdata['q']
query['qs'] = [qdata]
elif facet['type'] == 'range':
_prop = filter(lambda prop: prop['from'] == qdata, facet['properties'])[0]
label = '%(from)s - %(to)s ' % _prop
facet_id = facet['id']
# Only care about our current field:value filter
for fq in query['fqs']:
if fq['id'] == facet_id:
fq['properties'] = [_prop]
else:
label = qdata
facet_id = facet['id']
# Only care about our current field:value filter
for fq in query['fqs']:
if fq['id'] == facet_id:
fq['filter'] = [{'value': qdata, 'exclude': False}]
# Remove other facets from collection for speed
collection['facets'] = filter(lambda f: f['widgetType'] == 'histogram-widget', collection['facets'])
response = SolrApi(SOLR_URL.get(), request.user).query(collection, query)
response = augment_solr_response(response, collection, query)
label += ' (%s) ' % response['response']['numFound']
result['series'] = {'label': label, 'counts': response['normalized_facets'][0]['counts']}
result['status'] = 0
result['message'] = ''
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
@allow_viewer_only
def new_facet(request):
result = {'status': -1, 'message': 'Error'}
try:
collection = json.loads(request.POST.get('collection', '{}'))
facet_id = request.POST['id']
facet_label = request.POST['label']
facet_field = request.POST['field']
widget_type = request.POST['widget_type']
result['message'] = ''
result['facet'] = _create_facet(collection, request.user, facet_id, facet_label, facet_field, widget_type)
result['status'] = 0
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
def _create_facet(collection, user, facet_id, facet_label, facet_field, widget_type):
properties = {
'sort': 'desc',
'canRange': False,
'stacked': False,
'limit': 10,
'mincount': 0,
'isDate': False,
'aggregate': 'unique'
}
if widget_type in ('tree-widget', 'heatmap-widget', 'map-widget'):
facet_type = 'pivot'
elif widget_type == 'hit-widget':
facet_type = 'function'
else:
solr_api = SolrApi(SOLR_URL.get(), user)
range_properties = _new_range_facet(solr_api, collection, facet_field, widget_type)
if range_properties:
facet_type = 'range'
properties.update(range_properties)
properties['initial_gap'] = properties['gap']
properties['initial_start'] = properties['start']
properties['initial_end'] = properties['end']
else:
facet_type = 'field'
if widget_type == 'bucket-widget':
facet_type = 'nested'
properties['facets_form'] = {'field': '', 'mincount': 1, 'limit': 10, 'aggregate': 'count'}
properties['facets'] = []
properties['scope'] = 'stack'
properties['timelineChartType'] = 'bar'
if widget_type in ('tree-widget', 'heatmap-widget', 'map-widget'):
properties['mincount'] = 1
properties['facets'] = []
properties['stacked'] = True
properties['facets_form'] = {'field': '', 'mincount': 1, 'limit': 5}
if widget_type == 'map-widget':
properties['scope'] = 'world'
properties['limit'] = 100
else:
properties['scope'] = 'stack' if widget_type == 'heatmap-widget' else 'tree'
return {
'id': facet_id,
'label': facet_label,
'field': facet_field,
'type': facet_type,
'widgetType': widget_type,
'properties': properties
}
@allow_viewer_only
def get_range_facet(request):
result = {'status': -1, 'message': ''}
try:
collection = json.loads(request.POST.get('collection', '{}'))
facet = json.loads(request.POST.get('facet', '{}'))
action = request.POST.get('action', 'select')
solr_api = SolrApi(SOLR_URL.get(), request.user)
if action == 'select':
properties = _guess_gap(solr_api, collection, facet, facet['properties']['start'], facet['properties']['end'])
else:
properties = _zoom_range_facet(solr_api, collection, facet) # Zoom out
result['properties'] = properties
result['status'] = 0
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
def get_collection(request):
result = {'status': -1, 'message': ''}
try:
name = request.POST['name']
collection = Collection2(request.user, name=name)
collection_json = collection.get_json(request.user)
result['collection'] = json.loads(collection_json)
result['status'] = 0
except Exception, e:
result['message'] = force_unicode(e)
return JsonResponse(result)
def get_collections(request):
result = {'status': -1, 'message': ''}
try:
show_all = json.loads(request.POST.get('show_all'))
result['collection'] = SearchController(request.user).get_all_indexes(show_all=show_all)
result['status'] = 0
except Exception, e:
if 'does not have privileges' in str(e):
result['status'] = 0
result['collection'] = [json.loads(request.POST.get('collection'))['name']]
else:
result['message'] = force_unicode(e)
return JsonResponse(result)
def install_examples(request):
result = {'status': -1, 'message': ''}
if not request.user.is_superuser:
return PopupException(_("You must be a superuser."))
if request.method != 'POST':
result['message'] = _('A POST request is required.')
else:
try:
search_setup.Command().handle_noargs()
indexer_setup.Command().handle_noargs()
result['status'] = 0
except Exception, e:
LOG.exception(e)
result['message'] = str(e)
return JsonResponse(result)
| gpl-2.0 |
bswartz/cinder | cinder/volume/drivers/emc/emc_vmax_provision_v3.py | 1 | 35993 | # Copyright (c) 2012 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_concurrency import lockutils
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.volume.drivers.emc import emc_vmax_utils
LOG = logging.getLogger(__name__)
STORAGEGROUPTYPE = 4
POSTGROUPTYPE = 3
EMC_ROOT = 'root/emc'
THINPROVISIONINGCOMPOSITE = 32768
THINPROVISIONING = 5
INFO_SRC_V3 = 3
ACTIVATESNAPVX = 4
DEACTIVATESNAPVX = 19
SNAPSYNCTYPE = 7
class EMCVMAXProvisionV3(object):
"""Provisioning Class for SMI-S based EMC volume drivers.
This Provisioning class is for EMC volume drivers based on SMI-S.
It supports VMAX arrays.
"""
def __init__(self, prtcl):
self.protocol = prtcl
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
def delete_volume_from_pool(
self, conn, storageConfigservice, volumeInstanceName, volumeName,
extraSpecs):
"""Given the volume instance remove it from the pool.
:param conn: connection to the ecom server
:param storageConfigservice: volume created from job
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param extraSpecs: additional info
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
if isinstance(volumeInstanceName, list):
theElements = volumeInstanceName
volumeName = 'Bulk Delete'
else:
theElements = [volumeInstanceName]
rc, job = conn.InvokeMethod(
'ReturnElementsToStoragePool', storageConfigservice,
TheElements=theElements)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Delete Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ReturnElementsToStoragePool took: "
"%(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc
def create_volume_from_sg(
self, conn, storageConfigService, volumeName,
sgInstanceName, volumeSize, extraSpecs):
"""Create the volume and associate it with a storage group.
We use EMCCollections parameter to supply a Device Masking Group
to contain a newly created storage volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service
:param volumeName: the volume name (String)
:param sgInstanceName: the storage group instance name
associated with an SLO
:param volumeSize: volume size (String)
:param extraSpecs: additional info
:returns: dict -- volumeDict - the volume dict
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
try:
storageGroupInstance = conn.GetInstance(sgInstanceName)
except Exception:
exceptionMessage = (_(
"Unable to get the name of the storage group"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
@lockutils.synchronized(storageGroupInstance['ElementName'],
"emc-sg-", True)
def do_create_volume_from_sg():
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, ElementName=volumeName,
EMCCollections=[sgInstanceName],
ElementType=self.utils.get_num(THINPROVISIONING, '16'),
Size=self.utils.get_num(volumeSize, '64'))
LOG.debug("Create Volume: %(volumename)s. Return code: %(rc)lu.",
{'volumename': volumeName,
'rc': rc})
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
# Find the newly created volume.
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
return do_create_volume_from_sg()
def _find_new_storage_group(
self, conn, maskingGroupDict, storageGroupName):
"""After creating an new storage group find it and return it.
:param conn: connection to the ecom server
:param maskingGroupDict: the maskingGroupDict dict
:param storageGroupName: storage group name (String)
:returns: maskingGroupDict['MaskingGroup'] or None
"""
foundStorageGroupInstanceName = None
if 'MaskingGroup' in maskingGroupDict:
foundStorageGroupInstanceName = maskingGroupDict['MaskingGroup']
return foundStorageGroupInstanceName
def get_volume_dict_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
if len(associators) > 0:
return self.create_volume_dict(associators[0].path)
else:
exceptionMessage = (_(
"Unable to get storage volume from job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
def get_volume_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
if len(associators) > 0:
return associators[0]
else:
exceptionMessage = (_(
"Unable to get storage volume from job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
def create_volume_dict(self, volumeInstanceName):
"""Create volume dictionary
:param volumeInstanceName: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
volpath = volumeInstanceName
volumeDict = {}
volumeDict['classname'] = volpath.classname
keys = {}
keys['CreationClassName'] = volpath['CreationClassName']
keys['SystemName'] = volpath['SystemName']
keys['DeviceID'] = volpath['DeviceID']
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
volumeDict['keybindings'] = keys
return volumeDict
def create_element_replica(
self, conn, repServiceInstanceName,
cloneName, syncType, sourceInstance, extraSpecs,
targetInstance=None, rsdInstance=None):
"""Make SMI-S call to create replica for source element.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service
:param cloneName: clone volume name
:param syncType: 7=snapshot, 8=clone
:param sourceInstance: source volume instance
:param extraSpecs: additional info
:param targetInstance: Target volume instance. Default None
:param rsdInstance: replication settingdata instance. Default None
:returns: int -- rc - return code
:returns: job - job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
LOG.debug("Create replica: %(clone)s "
"syncType: %(syncType)s Source: %(source)s.",
{'clone': cloneName,
'syncType': syncType,
'source': sourceInstance.path})
storageSystemName = sourceInstance['SystemName']
__, __, sgInstanceName = (
self.utils.get_v3_default_sg_instance_name(
conn, extraSpecs[self.utils.POOL],
extraSpecs[self.utils.SLO],
extraSpecs[self.utils.WORKLOAD], storageSystemName))
try:
storageGroupInstance = conn.GetInstance(sgInstanceName)
except Exception:
exceptionMessage = (_(
"Unable to get the name of the storage group"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
@lockutils.synchronized(storageGroupInstance['ElementName'],
"emc-sg-", True)
def do_create_element_replica():
if targetInstance is None and rsdInstance is None:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=self.utils.get_num(syncType, '16'),
SourceElement=sourceInstance.path,
Collections=[sgInstanceName])
else:
rc, job = self._create_element_replica_extra_params(
conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rsdInstance,
sgInstanceName)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Cloned Volume: %(cloneName)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'cloneName': cloneName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateElementReplica "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
return do_create_element_replica()
def _create_element_replica_extra_params(
self, conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rsdInstance, sgInstanceName):
"""CreateElementReplica using extra parameters.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service
:param cloneName: clone volume name
:param syncType: 7=snapshot, 8=clone
:param sourceInstance: source volume instance
:param targetInstance: Target volume instance. Default None
:param rsdInstance: replication settingdata instance. Default None
:param sgInstanceName: pool instance name
:returns: int -- rc - return code
:returns: job - job object of the replica creation operation
"""
syncType = self.utils.get_num(syncType, '16')
if targetInstance and rsdInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path,
ReplicationSettingData=rsdInstance)
elif targetInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path)
elif rsdInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
ReplicationSettingData=rsdInstance,
Collections=[sgInstanceName])
return rc, job
def break_replication_relationship(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Deletes the relationship between the clone/snap and source volume.
Makes an SMI-S call to break clone relationship between the clone
volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to break replication relationship if True
:returns: rc - return code
:returns: job - job object of the replica creation operation
"""
LOG.debug("Break replication relationship: %(sv)s "
"operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs, force)
def create_storage_group_v3(self, conn, controllerConfigService,
groupName, srp, slo, workload, extraSpecs):
"""Create the volume in the specified pool.
:param conn: the connection information to the ecom server
:param controllerConfigService: the controller configuration service
:param groupName: the group name (String)
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
:param extraSpecs: additional info
:returns: storageGroupInstanceName - storage group instance name
"""
startTime = time.time()
@lockutils.synchronized(groupName, "emc-sg-", True)
def do_create_storage_group_v3():
if slo and workload:
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'),
EMCSRP=srp,
EMCSLO=slo,
EMCWorkload=workload)
else:
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'))
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(
conn, job, extraSpecs)
if rc != 0:
LOG.error(_LE(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s."),
{'groupName': groupName,
'rc': rc,
'error': errordesc})
raise
LOG.debug("InvokeMethod CreateGroup "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
foundStorageGroupInstanceName = self._find_new_storage_group(
conn, job, groupName)
return foundStorageGroupInstanceName
return do_create_storage_group_v3()
def get_storage_pool_capability(self, conn, poolInstanceName):
"""Get the pool capability.
:param conn: the connection information to the ecom server
:param poolInstanceName: the pool instance
:returns: the storage pool capability instance. None if not found
"""
storagePoolCapability = None
associators = (
conn.AssociatorNames(poolInstanceName,
ResultClass='Symm_StoragePoolCapabilities'))
if len(associators) > 0:
storagePoolCapability = associators[0]
return storagePoolCapability
def get_storage_pool_setting(
self, conn, storagePoolCapability, slo, workload):
"""Get the pool setting for pool capability.
:param conn: the connection information to the ecom server
:param storagePoolCapability: the storage pool capability instance
:param slo: the slo string e.g Bronze
:param workload: the workload string e.g DSS_REP
:returns: the storage pool setting instance
"""
foundStoragePoolSetting = None
storagePoolSettings = (
conn.AssociatorNames(storagePoolCapability,
ResultClass='CIM_storageSetting'))
for storagePoolSetting in storagePoolSettings:
settingInstanceID = storagePoolSetting['InstanceID']
matchString = ("%(slo)s:%(workload)s"
% {'slo': slo,
'workload': workload})
if matchString in settingInstanceID:
foundStoragePoolSetting = storagePoolSetting
break
if foundStoragePoolSetting is None:
exceptionMessage = (_(
"The array does not support the storage pool setting "
"for SLO %(slo)s and workload %(workload)s. Please "
"check the array for valid SLOs and workloads.")
% {'slo': slo,
'workload': workload})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return foundStoragePoolSetting
def _get_supported_size_range_for_SLO(
self, conn, storageConfigService,
srpPoolInstanceName, storagePoolSettingInstanceName, extraSpecs):
"""Gets available performance capacity per SLO.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service instance
:param srpPoolInstanceName: the SRP storage pool instance
:param storagePoolSettingInstanceName: the SLO type, e.g Bronze
:param extraSpecs: additional info
:returns: dict -- supportedSizeDict - the supported size dict
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, supportedSizeDict = conn.InvokeMethod(
'GetSupportedSizeRange',
srpPoolInstanceName,
ElementType=self.utils.get_num(3, '16'),
Goal=storagePoolSettingInstanceName)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(
conn, supportedSizeDict, extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Cannot get supported size range for %(sps)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sps': storagePoolSettingInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod GetSupportedSizeRange "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return supportedSizeDict
def get_volume_range(
self, conn, storageConfigService, poolInstanceName, slo, workload,
extraSpecs):
"""Get upper and lower range for volume for slo/workload combination.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage config service
:param poolInstanceName: the pool instance
:param slo: slo string e.g Bronze
:param workload: workload string e.g DSS
:param extraSpecs: additional info
:returns: supportedSizeDict
"""
supportedSizeDict = {}
storagePoolCapabilityInstanceName = self.get_storage_pool_capability(
conn, poolInstanceName)
if storagePoolCapabilityInstanceName:
storagePoolSettingInstanceName = self.get_storage_pool_setting(
conn, storagePoolCapabilityInstanceName, slo, workload)
supportedSizeDict = self._get_supported_size_range_for_SLO(
conn, storageConfigService, poolInstanceName,
storagePoolSettingInstanceName, extraSpecs)
return supportedSizeDict
def activate_snap_relationship(
self, conn, repServiceInstanceName, syncInstanceName, extraSpecs):
"""Activate snap relationship and start copy operation.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = ACTIVATESNAPVX
LOG.debug("Activate snap: %(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def return_to_resource_pool(self, conn, repServiceInstanceName,
syncInstanceName, extraSpecs):
"""Return the snap target resources back to the pool.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
:param extraSpecs: additional info
:returns: rc - return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = DEACTIVATESNAPVX
LOG.debug("Return snap resource back to pool: "
"%(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def _modify_replica_synchronization(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Modify the relationship between the clone/snap and source volume.
Helper function that makes an SMI-S call to break clone relationship
between the clone volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to modify replication synchronization if True
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'ModifyReplicaSynchronization', repServiceInstanceName,
Operation=self.utils.get_num(operation, '16'),
Synchronization=syncInstanceName,
Force=force)
LOG.debug("_modify_replica_synchronization: %(sv)s "
"operation: %(operation)s Return code: %(rc)lu.",
{'sv': syncInstanceName, 'operation': operation, 'rc': rc})
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error modify replica synchronization: %(sv)s "
"operation: %(operation)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sv': syncInstanceName, 'operation': operation,
'rc': rc, 'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ModifyReplicaSynchronization "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
def create_group_replica(
self, conn, replicationService,
srcGroupInstanceName, tgtGroupInstanceName, relationName,
extraSpecs):
"""Make SMI-S call to create replica for source group.
:param conn: the connection to the ecom server
:param replicationService: replication service
:param srcGroupInstanceName: source group instance name
:param tgtGroupInstanceName: target group instance name
:param relationName: replica relationship name
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
LOG.debug(
"Creating CreateGroupReplica V3: "
"replicationService: %(replicationService)s "
"RelationName: %(relationName)s "
"sourceGroup: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'replicationService': replicationService,
'relationName': relationName,
'srcGroup': srcGroupInstanceName,
'tgtGroup': tgtGroupInstanceName})
rc, job = conn.InvokeMethod(
'CreateGroupReplica',
replicationService,
RelationshipName=relationName,
SourceGroup=srcGroupInstanceName,
TargetGroup=tgtGroupInstanceName,
SyncType=self.utils.get_num(SNAPSYNCTYPE, '16'))
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMsg = (_("Error CreateGroupReplica: "
"source: %(source)s target: %(target)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'source': srcGroupInstanceName,
'target': tgtGroupInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMsg)
raise exception.VolumeBackendAPIException(data=exceptionMsg)
return rc, job
def get_srp_pool_stats(self, conn, arrayInfo):
"""Get the totalManagedSpace, remainingManagedSpace.
:param conn: the connection to the ecom server
:param arrayInfo: the array dict
:returns: totalCapacityGb
:returns: remainingCapacityGb
"""
totalCapacityGb = -1
remainingCapacityGb = -1
storageSystemInstanceName = self.utils.find_storageSystem(
conn, arrayInfo['SerialNumber'])
srpPoolInstanceNames = conn.AssociatorNames(
storageSystemInstanceName,
ResultClass='Symm_SRPStoragePool')
for srpPoolInstanceName in srpPoolInstanceNames:
poolnameStr = self.utils.get_pool_name(conn, srpPoolInstanceName)
if six.text_type(arrayInfo['PoolName']) == (
six.text_type(poolnameStr)):
try:
# Check that pool hasn't suddently been deleted.
srpPoolInstance = conn.GetInstance(srpPoolInstanceName)
propertiesList = srpPoolInstance.properties.items()
for properties in propertiesList:
if properties[0] == 'TotalManagedSpace':
cimProperties = properties[1]
totalManagedSpace = cimProperties.value
totalCapacityGb = self.utils.convert_bits_to_gbs(
totalManagedSpace)
elif properties[0] == 'RemainingManagedSpace':
cimProperties = properties[1]
remainingManagedSpace = cimProperties.value
remainingCapacityGb = (
self.utils.convert_bits_to_gbs(
remainingManagedSpace))
except Exception:
pass
remainingSLOCapacityGb = (
self._get_remaining_slo_capacity_wlp(
conn, srpPoolInstanceName, arrayInfo,
storageSystemInstanceName['Name']))
if remainingSLOCapacityGb != -1:
remainingCapacityGb = remainingSLOCapacityGb
else:
LOG.warning(_LW(
"Remaining capacity %(remainingCapacityGb)s "
"GBs is determined from SRP pool capacity "
"and not the SLO capacity. Performance may "
"not be what you expect."),
{'remainingCapacityGb': remainingCapacityGb})
return totalCapacityGb, remainingCapacityGb
def _get_remaining_slo_capacity_wlp(self, conn, srpPoolInstanceName,
arrayInfo, systemName):
"""Get the remaining SLO capacity.
This is derived from the WLP portion of Unisphere. Please
see the SMIProvider doc and the readme doc for details.
:param conn: the connection to the ecom server
:param srpPoolInstanceName: SRP instance name
:param arrayInfo: the array dict
:param systemName: the system name
:returns: remainingCapacityGb
"""
remainingCapacityGb = -1
storageConfigService = (
self.utils.find_storage_configuration_service(
conn, systemName))
supportedSizeDict = (
self.get_volume_range(
conn, storageConfigService, srpPoolInstanceName,
arrayInfo['SLO'], arrayInfo['Workload'],
None))
try:
# Information source is V3.
if supportedSizeDict['EMCInformationSource'] == INFO_SRC_V3:
remainingCapacityGb = self.utils.convert_bits_to_gbs(
supportedSizeDict['EMCRemainingSLOCapacity'])
LOG.debug("Received remaining SLO Capacity "
"%(remainingCapacityGb)s GBs for SLO "
"%(SLO)s and workload %(workload)s.",
{'remainingCapacityGb': remainingCapacityGb,
'SLO': arrayInfo['SLO'],
'workload': arrayInfo['Workload']})
except KeyError:
pass
return remainingCapacityGb
def extend_volume_in_SG(
self, conn, storageConfigService, volumeInstanceName,
volumeName, volumeSize, extraSpecs):
"""Extend a volume instance.
:param conn: connection to the ecom server
:param storageConfigservice: the storage configuration service
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param volumeSize: the volume size
:param extraSpecs: additional info
:returns: volumeDict
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, TheElement=volumeInstanceName,
Size=self.utils.get_num(volumeSize, '64'))
LOG.debug("Extend Volume: %(volumename)s. Return code: %(rc)lu.",
{'volumename': volumeName,
'rc': rc})
if rc != 0:
rc, error_desc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Extend Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': error_desc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
# Find the newly created volume.
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
| apache-2.0 |
joel-airspring/Diamond | src/collectors/s3/s3.py | 61 | 2360 | # coding=utf-8
"""
The S3BucketCollector collects bucket size using boto
#### Dependencies
* boto (https://github.com/boto/boto)
"""
import diamond.collector
try:
import boto
boto
from boto.s3.connection import S3Connection
except ImportError:
boto = None
class S3BucketCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(S3BucketCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(S3BucketCollector, self).get_default_config()
config.update({
'path': 'aws.s3',
'byte_unit': 'byte'
})
return config
def getBucket(self, aws_access, aws_secret, bucket_name):
self.log.info("S3: Open Bucket, %s, %s, %s" % (bucket_name, aws_access,
aws_secret))
s3 = S3Connection(aws_access, aws_secret)
return s3.lookup(bucket_name)
def getBucketSize(self, bucket):
total_bytes = 0
for key in bucket:
total_bytes += key.size
return total_bytes
def collect(self):
"""
Collect s3 bucket stats
"""
if boto is None:
self.log.error("Unable to import boto python module")
return {}
for s3instance in self.config['s3']:
self.log.info("S3: byte_unit: %s" % self.config['byte_unit'])
aws_access = self.config['s3'][s3instance]['aws_access_key']
aws_secret = self.config['s3'][s3instance]['aws_secret_key']
for bucket_name in self.config['s3'][s3instance]['buckets']:
bucket = self.getBucket(aws_access, aws_secret, bucket_name)
# collect bucket size
total_size = self.getBucketSize(bucket)
for byte_unit in self.config['byte_unit']:
new_size = diamond.convertor.binary.convert(
value=total_size,
oldUnit='byte',
newUnit=byte_unit
)
self.publish("%s.size.%s" % (bucket_name, byte_unit),
new_size)
| mit |
yanchen036/tensorflow | tensorflow/python/framework/versions_test.py | 164 | 2130 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for exposed tensorflow versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import versions
from tensorflow.python.platform import test
class VersionTest(test.TestCase):
def testVersion(self):
self.assertEqual(type(versions.__version__), str)
self.assertEqual(type(versions.VERSION), str)
# This pattern will need to grow as we include alpha, builds, etc.
self.assertRegexpMatches(versions.__version__,
r'^\d+\.\d+\.(\d+(\-\w+)?|head)$')
self.assertRegexpMatches(versions.VERSION,
r'^\d+\.\d+\.(\d+(\-\w+)?|head)$')
def testGraphDefVersion(self):
version = versions.GRAPH_DEF_VERSION
min_consumer = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
min_producer = versions.GRAPH_DEF_VERSION_MIN_PRODUCER
for v in version, min_consumer, min_producer:
self.assertEqual(type(v), int)
self.assertLessEqual(0, min_consumer)
self.assertLessEqual(0, min_producer)
self.assertLessEqual(min_producer, version)
def testGitAndCompilerVersion(self):
self.assertEqual(type(versions.__git_version__), str)
self.assertEqual(type(versions.__compiler_version__), str)
self.assertEqual(type(versions.GIT_VERSION), str)
self.assertEqual(type(versions.COMPILER_VERSION), str)
if __name__ == '__main__':
test.main()
| apache-2.0 |
guijomatos/SickRage | sickrage/media/ShowBanner.py | 15 | 1291 | # This file is part of SickRage.
#
# URL: https://www.sickrage.tv
# Git: https://github.com/SiCKRAGETV/SickRage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from sickbeard.image_cache import ImageCache
from sickrage.media.GenericMedia import GenericMedia
class ShowBanner(GenericMedia):
"""
Get the banner of a show
"""
def get_default_media_name(self):
return 'banner.png'
def get_media_path(self):
if self.get_show():
if self.media_format == 'normal':
return ImageCache().banner_path(self.indexer_id)
if self.media_format == 'thumb':
return ImageCache().banner_thumb_path(self.indexer_id)
return ''
| gpl-3.0 |
samdowd/drumm-farm | drumm_env/lib/python2.7/site-packages/setuptools/command/build_clib.py | 314 | 4484 | import distutils.command.build_clib as orig
from distutils.errors import DistutilsSetupError
from distutils import log
from setuptools.dep_util import newer_pairwise_group
class build_clib(orig.build_clib):
"""
Override the default build_clib behaviour to do the following:
1. Implement a rudimentary timestamp-based dependency system
so 'compile()' doesn't run every time.
2. Add more keys to the 'build_info' dictionary:
* obj_deps - specify dependencies for each object compiled.
this should be a dictionary mapping a key
with the source filename to a list of
dependencies. Use an empty string for global
dependencies.
* cflags - specify a list of additional flags to pass to
the compiler.
"""
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
sources = list(sources)
log.info("building '%s' library", lib_name)
# Make sure everything is the correct type.
# obj_deps should be a dictionary of keys as sources
# and a list/tuple of files that are its dependencies.
obj_deps = build_info.get('obj_deps', dict())
if not isinstance(obj_deps, dict):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
dependencies = []
# Get the global dependencies that are specified by the '' key.
# These will go into every source's dependency list.
global_deps = obj_deps.get('', list())
if not isinstance(global_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
# Build the list to be used by newer_pairwise_group
# each source will be auto-added to its dependencies.
for source in sources:
src_deps = [source]
src_deps.extend(global_deps)
extra_deps = obj_deps.get(source, list())
if not isinstance(extra_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
src_deps.extend(extra_deps)
dependencies.append(src_deps)
expected_objects = self.compiler.object_filenames(
sources,
output_dir=self.build_temp
)
if newer_pairwise_group(dependencies, expected_objects) != ([], []):
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
cflags = build_info.get('cflags')
objects = self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
extra_postargs=cflags,
debug=self.debug
)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(
expected_objects,
lib_name,
output_dir=self.build_clib,
debug=self.debug
)
| mit |
angel511wong/nixysa | third_party/ply-3.1/test/yacc_unused.py | 174 | 1669 | # -----------------------------------------------------------------------------
# yacc_unused.py
#
# A grammar with an unused rule
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_expr_list(t):
'exprlist : exprlist COMMA expression'
pass
def p_expr_list_2(t):
'exprlist : expression'
pass
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| apache-2.0 |
guorendong/iridium-browser-ubuntu | third_party/webpagereplay/proxyshaper_test.py | 31 | 4988 | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for proxyshaper.
Usage:
$ ./proxyshaper_test.py
"""
import proxyshaper
import StringIO
import unittest
# pylint: disable=bad-whitespace
VALID_RATES = (
# input, expected_bps
( '384Kbit/s', 384000),
('1536Kbit/s', 1536000),
( '1Mbit/s', 1000000),
( '5Mbit/s', 5000000),
( '2MByte/s', 16000000),
( '0', 0),
( '5', 5),
( 384000, 384000),
)
ERROR_RATES = (
'1536KBit/s', # Older versions of dummynet used capital 'B' for bytes.
'1Mbyte/s', # Require capital 'B' for bytes.
'5bps',
)
class TimedTestCase(unittest.TestCase):
def assertValuesAlmostEqual(self, expected, actual, tolerance=0.05):
"""Like the following with nicer default message:
assertTrue(expected <= actual + tolerance &&
expected >= actual - tolerance)
"""
delta = tolerance * expected
if actual > expected + delta or actual < expected - delta:
self.fail('%s is not equal to expected %s +/- %s%%' % (
actual, expected, 100 * tolerance))
class RateLimitedFileTest(TimedTestCase):
def testReadLimitedBasic(self):
num_bytes = 1024
bps = 384000
request_counter = lambda: 1
f = StringIO.StringIO(' ' * num_bytes)
limited_f = proxyshaper.RateLimitedFile(request_counter, f, bps)
start = proxyshaper.TIMER()
self.assertEqual(num_bytes, len(limited_f.read()))
expected_ms = 8.0 * num_bytes / bps * 1000.0
actual_ms = (proxyshaper.TIMER() - start) * 1000.0
self.assertValuesAlmostEqual(expected_ms, actual_ms)
def testReadlineLimitedBasic(self):
num_bytes = 1024 * 8 + 512
bps = 384000
request_counter = lambda: 1
f = StringIO.StringIO(' ' * num_bytes)
limited_f = proxyshaper.RateLimitedFile(request_counter, f, bps)
start = proxyshaper.TIMER()
self.assertEqual(num_bytes, len(limited_f.readline()))
expected_ms = 8.0 * num_bytes / bps * 1000.0
actual_ms = (proxyshaper.TIMER() - start) * 1000.0
self.assertValuesAlmostEqual(expected_ms, actual_ms)
def testReadLimitedSlowedByMultipleRequests(self):
num_bytes = 1024
bps = 384000
request_count = 2
request_counter = lambda: request_count
f = StringIO.StringIO(' ' * num_bytes)
limited_f = proxyshaper.RateLimitedFile(request_counter, f, bps)
start = proxyshaper.TIMER()
num_read_bytes = limited_f.read()
self.assertEqual(num_bytes, len(num_read_bytes))
expected_ms = 8.0 * num_bytes / (bps / float(request_count)) * 1000.0
actual_ms = (proxyshaper.TIMER() - start) * 1000.0
self.assertValuesAlmostEqual(expected_ms, actual_ms)
def testWriteLimitedBasic(self):
num_bytes = 1024 * 10 + 350
bps = 384000
request_counter = lambda: 1
f = StringIO.StringIO()
limited_f = proxyshaper.RateLimitedFile(request_counter, f, bps)
start = proxyshaper.TIMER()
limited_f.write(' ' * num_bytes)
self.assertEqual(num_bytes, len(limited_f.getvalue()))
expected_ms = 8.0 * num_bytes / bps * 1000.0
actual_ms = (proxyshaper.TIMER() - start) * 1000.0
self.assertValuesAlmostEqual(expected_ms, actual_ms)
def testWriteLimitedSlowedByMultipleRequests(self):
num_bytes = 1024 * 10
bps = 384000
request_count = 2
request_counter = lambda: request_count
f = StringIO.StringIO(' ' * num_bytes)
limited_f = proxyshaper.RateLimitedFile(request_counter, f, bps)
start = proxyshaper.TIMER()
limited_f.write(' ' * num_bytes)
self.assertEqual(num_bytes, len(limited_f.getvalue()))
expected_ms = 8.0 * num_bytes / (bps / float(request_count)) * 1000.0
actual_ms = (proxyshaper.TIMER() - start) * 1000.0
self.assertValuesAlmostEqual(expected_ms, actual_ms)
class GetBitsPerSecondTest(unittest.TestCase):
def testConvertsValidValues(self):
for dummynet_option, expected_bps in VALID_RATES:
bps = proxyshaper.GetBitsPerSecond(dummynet_option)
self.assertEqual(
expected_bps, bps, 'Unexpected result for %s: %s != %s' % (
dummynet_option, expected_bps, bps))
def testRaisesOnUnexpectedValues(self):
for dummynet_option in ERROR_RATES:
self.assertRaises(proxyshaper.BandwidthValueError,
proxyshaper.GetBitsPerSecond, dummynet_option)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
chanchett/ds3_python_sdk_ | ds3/ds3.py | 1 | 30418 | # Copyright 2014-2015 Spectra Logic Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from ctypes import *
import libds3
def checkExistence(obj, wrapper = lambda ds3Str: ds3Str.contents.value):
if obj:
return wrapper(obj)
else:
return None
def arrayToList(array, length, wrapper = lambda ds3Str: ds3Str.contents.value):
result = []
for i in xrange(0, length):
result.append(wrapper(array[i]))
return result
class Ds3Error(Exception):
def __init__(self, libds3Error):
self.reason = libds3Error.contents.message.contents.value
response = libds3Error.contents.error
self._hasResponse = False
self.statusCode = None
self.statusMessage = None
self.message = None
if response:
self._hasResponse = True
self.statusCode = response.contents.status_code
self.statusMessage = response.contents.status_message.contents.value
self.message = checkExistence(response.contents.error_body)
libds3.lib.ds3_free_error(libds3Error)
def __str__(self):
errorMessage = "Reason: " + self.reason
if self._hasResponse:
errorMessage += " | StatusCode: " + str(self.statusCode)
errorMessage += " | StatusMessage: " + self.statusMessage
if self.message:
errorMessage += " | Message: " + self.message
return errorMessage
def __repr__(self):
return self.__str__()
class Credentials(object):
def __init__(self, accessKey, secretKey):
self.accessKey = accessKey
self.secretKey = secretKey
class Ds3Bucket(object):
def __init__(self, ds3Bucket):
self.name = ds3Bucket.name.contents.value
self.creationDate = ds3Bucket.creation_date.contents.value
def __str__(self):
return "Name: " + self.name + " | Creation Date: " + self.creationDate
def __repr__(self):
return self.__str__()
class Ds3Owner(object):
def __init__(self, ds3Owner):
ownerContents = ds3Owner.contents
self.name = ownerContents.name.contents.value
self.id = ownerContents.id.contents.value
def __str__(self):
return "Name: " + self.name + " | ID: " + self.id
def __repr__(self):
return self.__str__()
class Ds3Object(object):
def __init__(self, ds3Object):
self.name = ds3Object.name.contents.value
self.etag = checkExistence(ds3Object.etag)
self.size = ds3Object.size
self.owner = Ds3Owner(ds3Object.owner)
def __str__(self):
return "Name: " + self.name + " | Size: " + str(self.size) + " | Etag: " + str(self.etag) + " | Owner: " + str(self.owner)
def __repr__(self):
return self.__str__()
class Ds3BucketDetails(object):
def __init__(self, ds3Bucket):
bucketContents = ds3Bucket.contents
self.name = bucketContents.name.contents.value
self.creationDate = checkExistence(bucketContents.creation_date)
self.isTruncated = bool(bucketContents.is_truncated)
self.marker = checkExistence(bucketContents.marker)
self.delimiter = checkExistence(bucketContents.delimiter)
self.maxKeys = bucketContents.max_keys
self.nextMarker = checkExistence(bucketContents.next_marker)
self.prefix = checkExistence(bucketContents.prefix)
self.commonPrefixes = arrayToList(bucketContents.common_prefixes, bucketContents.num_common_prefixes)
self.objects = arrayToList(bucketContents.objects, bucketContents.num_objects, wrapper = Ds3Object)
class Ds3BulkObject(object):
def __init__(self, bulkObject):
self.name = bulkObject.name.contents.value
self.length = bulkObject.length
self.offset = bulkObject.offset
self.inCache = bool(bulkObject.in_cache)
def __str__(self):
return "Name:" + self.name + " | Length: " + str(self.length) + " | Offset: " + str(self.offset) + " | InCache: " + str(self.inCache)
def __repr__(self):
return self.__str__()
class Ds3CacheList(object):
def __init__(self, bulkObjectList):
contents = bulkObjectList.contents
self.chunkNumber = contents.chunk_number
self.nodeId = checkExistence(contents.node_id)
self.serverId = checkExistence(contents.server_id)
self.chunkId = contents.chunk_id.contents.value
self.objects = arrayToList(contents.list, contents.size, wrapper = Ds3BulkObject)
class Ds3BulkPlan(object):
def __init__(self, ds3BulkResponse):
contents = ds3BulkResponse.contents
self.bucketName = checkExistence(contents.bucket_name)
if contents.cached_size_in_bytes:
self.cachedSize = contents.cached_size_in_bytes
if contents.completed_size_in_bytes:
self.compltedSize = contents.completed_size_in_bytes
self.jobId = checkExistence(contents.job_id)
if contents.original_size_in_bytes:
self.originalSize = contents.original_size_in_bytes
self.startDate = checkExistence(contents.start_date)
self.userId = checkExistence(contents.user_id)
self.userName = checkExistence(contents.user_name)
self.requestType = contents.request_type
self.status = contents.status
self.chunks = arrayToList(contents.list, contents.list_size, wrapper = Ds3CacheList)
def __str__(self):
response = "JobId: " + self.jobId
response += " | Status: " + str(self.status)
response += " | Request Type: " + str(self.requestType)
response += " | BucketName: " + self.bucketName
response += " | UserName: " + self.userName
response += " | Chunks: " + str(self.chunks)
return response
def __repr__(self):
return self.__str__()
class Ds3AllocateChunkResponse(object):
def __init__(self, ds3AllocateChunkResponse):
contents = ds3AllocateChunkResponse.contents
self.retryAfter = contents.retry_after
self.chunk = Ds3CacheList(contents.objects)
class Ds3AvailableChunksResponse(object):
def __init__(self, ds3AvailableChunksResponse):
contents = ds3AvailableChunksResponse.contents
self.retryAfter = contents.retry_after
self.bulkPlan = Ds3BulkPlan(contents.object_list)
class Ds3SearchObject(object):
def __init__(self, ds3SearchObject):
contents = ds3SearchObject.contents
self.bucketId = checkExistence(contents.bucket_id)
self.id = checkExistence(contents.id)
self.name = checkExistence(contents.name)
self.size = contents.size
self.owner = checkExistence(contents.owner, wrapper = Ds3Owner)
self.lastModified = checkExistence(contents.last_modified)
self.storageClass = checkExistence(contents.storage_class)
self.type = checkExistence(contents.type)
self.version = checkExistence(contents.version)
def __str__(self):
response = "BucketId: " + str(self.bucketId)
response += " | Id: " + str(self.id)
response += " | Name: " + str(self.name)
response += " | Size: " + str(self.size)
response += " | Owner: (" + str(self.id) + ")"
response += " | LastModified: " + str(self.lastModified)
response += " | StorageClass: " + str(self.storageClass)
response += " | Type: " + str(self.type)
response += " | Version: " + str(self.version)
return response
class Ds3BuildInformation(object):
def __init__(self, ds3BuildInfo):
contents = ds3BuildInfo.contents
self.branch = checkExistence(contents.branch)
self.revision = checkExistence(contents.revision)
self.version = checkExistence(contents.version)
def __str__(self):
response = "Branch: " + str(self.branch)
response += " | Revision: " + str(self.revision)
response += " | Version: " + str(self.version)
return response
class Ds3SystemInformation(object):
def __init__(self, ds3SystemInfo):
contents = ds3SystemInfo.contents
self.apiVersion = checkExistence(contents.api_version)
self.serialNumber = checkExistence(contents.serial_number)
self.buildInformation = checkExistence(contents.build_information, wrapper = Ds3BuildInformation)
def __str__(self):
response = "API Version: " + str(self.apiVersion)
response += " | Serial Number: " + str(self.serialNumber)
response += " | Build Information: " + str(self.buildInformation)
return response
class Ds3SystemHealthInformation(object):
def __init__(self, ds3HealthInfo):
contents = ds3HealthInfo.contents
self.msRequiredToVerifyDataPlannerHealth = contents.ms_required_to_verify_data_planner_health
def typeCheck(input_arg, type_to_check):
if isinstance(input_arg, type_to_check):
return input_arg
else:
raise TypeError("expected instance of type " + type_to_check.__name__ + ", got instance of type " + type(input_arg).__name__)
def typeCheckString(input_arg):
return typeCheck(input_arg, basestring)
def enumCheck(input_arg, enum_dict):
if input_arg in enum_dict.keys():
return enum_dict[input_arg]
else:
raise TypeError("expected value to be one of " + str(enum_dict.keys()) + ", got " + str(input_arg))
def enumCheckDs3ObjectType(input_arg):
return enumCheck(input_arg, {"DATA":0, "FOLDER":1})
def addMetadataToRequest(request, metadata):
if metadata:
for key in metadata:
if type(metadata[key]) is list or type(metadata[key]) is tuple:
for value in metadata[key]:
libds3.lib.ds3_request_set_metadata(request, key, value);
else:
libds3.lib.ds3_request_set_metadata(request, key, metadata[key]);
def extractMetadataFromResponse(metaData):
result = {}
keys = libds3.lib.ds3_metadata_keys(metaData)
if keys:
for key_index in xrange(0, keys.contents.num_keys):
key = keys.contents.keys[key_index].contents.value
metadataEntry = libds3.lib.ds3_metadata_get_entry(metaData, key)
result[key] = arrayToList(metadataEntry.contents.values, metadataEntry.contents.num_values)
libds3.lib.ds3_free_metadata_entry(metadataEntry)
libds3.lib.ds3_free_metadata_keys(keys)
return result
def createClientFromEnv():
libDs3Client = POINTER(libds3.LibDs3Client)()
error = libds3.lib.ds3_create_client_from_env(byref(libDs3Client))
if error:
raise Ds3Error(error)
clientContents = libDs3Client.contents
clientCreds = clientContents.creds.contents
creds = Credentials(clientCreds.access_id.contents.value, clientCreds.secret_key.contents.value)
proxyValue = checkExistence(clientContents.proxy)
client = Ds3Client(clientContents.endpoint.contents.value, creds, proxyValue)
libds3.lib.ds3_free_creds(clientContents.creds)
libds3.lib.ds3_free_client(libDs3Client)
return client
class Ds3Client(object):
'''
This object is used to communicate with a remote DS3/Spectra S3 endpoint. All communication with the Spectra S3 API is done with this class.
'''
def __init__(self, endpoint, credentials, proxy = None):
self._ds3Creds = libds3.lib.ds3_create_creds(c_char_p(credentials.accessKey), c_char_p(credentials.secretKey))
self._client = libds3.lib.ds3_create_client(c_char_p(endpoint), self._ds3Creds)
self.credentials = credentials
def verifySystemHealth(self):
'''
Returns how long it took to verify the health of the system. In the event that the system is in a bad state, an error will
be thrown.
'''
response = POINTER(libds3.LibDs3VerifySystemHealthResponse)()
request = libds3.lib.ds3_init_verify_system_health()
error = libds3.lib.ds3_verify_system_health(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3SystemHealthInformation(response)
libds3.lib.ds3_free_verify_system_health(response)
return result
def getService(self):
'''
Returns a list of all the buckets the current access id has access to.
'''
response = POINTER(libds3.LibDs3GetServiceResponse)()
request = libds3.lib.ds3_init_get_service()
error = libds3.lib.ds3_get_service(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
contents = response.contents
for i in xrange(0, response.contents.num_buckets):
yield Ds3Bucket(contents.buckets[i])
libds3.lib.ds3_free_service_response(response)
def getBucket(self, bucketName, prefix = None, nextMarker = None, delimiter = None, maxKeys = None):
'''
Returns a list of all the objects in a specific bucket as specified by `bucketName`. This will return at most 1000 objects.
In order to retrieve more, pagination must be used. The `nextMarker` is used to specify where the next 1000 objects will
start listing from.
`delimiter` can be used to list objects like directories. So for example, if delimiter is set to '/' then it will return
a list of 'directories' in the commons prefixes field in the response. In order to list all the files in that directory use the prefix parameter.
For example:
client.getBucket("my_bucket", prefix = 'dir', delimiter = '/')
The above will list any files and directories that are in the 'dir' directory.
'''
response = POINTER(libds3.LibDs3GetBucketResponse)()
request = libds3.lib.ds3_init_get_bucket(typeCheckString(bucketName))
if prefix:
libds3.lib.ds3_request_set_prefix(request, typeCheckString(prefix))
if nextMarker:
libds3.lib.ds3_request_set_marker(request, nextMarker)
if delimiter:
libds3.lib.ds3_request_set_delimiter(request, typeCheckString(delimiter))
if maxKeys:
libds3.lib.ds3_request_set_max_keys(request, maxKeys)
error = libds3.lib.ds3_get_bucket(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bucket = Ds3BucketDetails(response)
libds3.lib.ds3_free_bucket_response(response)
return bucket
def headObject(self, bucketName, objectName):
'''
Returns the metadata for the retrieved object as a dictionary of lists. If the object does not exist
an error is thrown with a status code of 404.
'''
response = POINTER(libds3.LibDs3Metadata)()
request = libds3.lib.ds3_init_head_object(typeCheckString(bucketName), typeCheckString(objectName))
error = libds3.lib.ds3_head_object(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
metadata = extractMetadataFromResponse(response)
libds3.lib.ds3_free_metadata(response)
return metadata
def headBucket(self, bucketName):
'''
Checks whether a bucket exists.
'''
request = libds3.lib.ds3_init_head_bucket(typeCheckString(bucketName))
error = libds3.lib.ds3_head_bucket(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteFolder(self, bucketName, folderName):
'''
Deletes a folder and all the objects contained within it.
'''
request = libds3.lib.ds3_init_delete_folder(typeCheckString(bucketName), typeCheckString(folderName))
error = libds3.lib.ds3_delete_folder(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def getSystemInformation(self):
'''
Returns the version and other information about the Spectra S3 endpoint.
'''
response = POINTER(libds3.LibDs3GetSystemInformationResponse)()
request = libds3.lib.ds3_init_get_system_information()
error = libds3.lib.ds3_get_system_information(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3SystemInformation(response)
libds3.lib.ds3_free_get_system_information(response)
return result
def getObject(self, bucketName, objectName, offset, jobId, realFileName = None):
'''
Gets an object from the Spectra S3 endpoint. Use `realFileName` when the `objectName`
that you are getting from Spectra S3 does not match what will be on the local filesystem.
Returns the metadata for the retrieved object as a dictionary, where keys are
associated with a list of the values for that key.
This can only be used within the context of a Bulk Get Job.
'''
objectName = typeCheckString(objectName)
effectiveFileName = objectName
if realFileName:
effectiveFileName = typeCheckString(realFileName)
response = POINTER(libds3.LibDs3Metadata)()
request = libds3.lib.ds3_init_get_object_for_job(typeCheckString(bucketName), objectName, offset, jobId)
localFile = open(effectiveFileName, "w")
error = libds3.lib.ds3_get_object_with_metadata(self._client, request, byref(c_int(localFile.fileno())), libds3.lib.ds3_write_to_fd, byref(response))
localFile.close()
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
metadata = extractMetadataFromResponse(response)
libds3.lib.ds3_free_metadata(response)
return metadata
def putBucket(self, bucketName):
'''
Creates a new bucket where objects can be stored.
'''
bucketName = typeCheckString(bucketName)
request = libds3.lib.ds3_init_put_bucket(bucketName)
error = libds3.lib.ds3_put_bucket(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def putObject(self, bucketName, objectName, offset, size, jobId, realFileName = None, metadata = None):
'''
Puts an object to the Spectra S3 endpoint. Use `realFileName` when the `objectName`
that you are putting to Spectra S3 does not match what is on the local filesystem.
Use metadata to set the metadata for the object. metadata's value should be
a dictionary, where keys are associated with either a value or a list of the
values for that key.
This can only be used within the context of a Spectra S3 Bulk Put job.
'''
objectName = typeCheckString(objectName)
effectiveFileName = objectName
if realFileName:
effectiveFileName = typeCheckString(realFileName)
request = libds3.lib.ds3_init_put_object_for_job(typeCheckString(bucketName), objectName, offset, size, jobId)
addMetadataToRequest(request, metadata)
localFile = open(effectiveFileName, "r")
error = libds3.lib.ds3_put_object(self._client, request, byref(c_int(localFile.fileno())), libds3.lib.ds3_read_from_fd)
localFile.close()
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteObject(self, bucketName, objName):
'''
Deletes an object from the specified bucket. If deleting several files at once, use `deleteObjects` instead.
'''
request = libds3.lib.ds3_init_delete_object(typeCheckString(bucketName), typeCheckString(objName))
error = libds3.lib.ds3_delete_object(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteObjects(self, bucketName, fileNameList):
'''
Deletes multiple objects from the bucket using a single API call.
'''
bulkObjs = libds3.toDs3BulkObjectList(fileNameList)
request = libds3.lib.ds3_init_delete_objects(typeCheckString(bucketName))
error = libds3.lib.ds3_delete_objects(self._client, request, bulkObjs)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteBucket(self, bucketName):
'''
Deletes a bucket. If the bucket is not empty, then this request will fail. All objects must be deleted first
before the bucket can be deleted.
'''
request = libds3.lib.ds3_init_delete_bucket(typeCheckString(bucketName))
error = libds3.lib.ds3_delete_bucket(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def putBulk(self, bucketName, fileInfoList):
'''
Initiates a start bulk put with the remote Spectra S3 endpoint. The `fileInfoList` is a list of (objectName, size) tuples.
`objectName` does not have to be the actual name on the local file system, but it will be the name that you must
initiate a single object put to later. `size` must reflect the actual size of the file that is being put.
'''
bulkObjs = libds3.lib.ds3_init_bulk_object_list(len(fileInfoList))
bulkObjsList = bulkObjs.contents.list
for i in xrange(0, len(fileInfoList)):
bulkObjsList[i].name = libds3.lib.ds3_str_init(fileInfoList[i][0])
bulkObjsList[i].length = fileInfoList[i][1]
response = POINTER(libds3.LibDs3BulkResponse)()
request = libds3.lib.ds3_init_put_bulk(typeCheckString(bucketName), bulkObjs)
error = libds3.lib.ds3_bulk(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bulkResponse = Ds3BulkPlan(response)
libds3.lib.ds3_free_bulk_response(response)
return bulkResponse
def getBulk(self, bucketName, fileNameList, chunkOrdering = True):
'''
Initiates a start bulk get with the remote Spectra S3 endpoint. All the files that will be retrieved must be specified in
`fileNameList`.
'''
bulkObjs = libds3.toDs3BulkObjectList(fileNameList)
response = POINTER(libds3.LibDs3BulkResponse)()
chunkOrderingValue = libds3.LibDs3ChunkOrdering.IN_ORDER
if not chunkOrdering:
chunkOrderingValue = libds3.LibDs3ChunkOrdering.NONE
request = libds3.lib.ds3_init_get_bulk(typeCheckString(bucketName), bulkObjs, chunkOrderingValue)
error = libds3.lib.ds3_bulk(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bulkResponse = Ds3BulkPlan(response)
libds3.lib.ds3_free_bulk_response(response)
return bulkResponse
def getObjects(self, bucketName = None, creationDate = None, objId = None, name = None, pageLength = None, pageOffset = None, objType = None, version = None):
'''
Returns a list of objects.
'''
# TODO: need to add an example here of what different query strings are supported
request = libds3.lib.ds3_init_get_objects()
response = POINTER(libds3.LibDs3GetObjectsResponse)()
if bucketName:
libds3.lib.ds3_request_set_bucket_name(request, typeCheckString(bucketName))
if creationDate:
libds3.lib.ds3_request_set_creation_date(request, typeCheckString(creationDate))
if objId:
libds3.lib.ds3_request_set_id(request, typeCheckString(objId))
if name:
libds3.lib.ds3_request_set_name(request, typeCheckString(name))
if pageLength:
libds3.lib.ds3_request_set_page_length(request, typeCheckString(str(pageLength)))
if pageOffset:
libds3.lib.ds3_request_set_page_offset(request, typeCheckString(str(pageOffset)))
if objType:
libds3.lib.ds3_request_set_type(request, enumCheckDs3ObjectType(objType))
if version:
libds3.lib.ds3_request_set_version(request, typeCheckString(str(version)))
error = libds3.lib.ds3_get_objects(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = arrayToList(response.contents.objects, response.contents.num_objects, wrapper = Ds3SearchObject)
libds3.lib.ds3_free_objects_response(response)
return result
def allocateChunk(self, chunkId):
'''
*Deprecated* - Allocates a specific chunk to be allocated in cache so that the objects in that chunk can safely be put without a need
to handle 307 redirects.
'''
request = libds3.lib.ds3_init_allocate_chunk(chunkId)
response = POINTER(libds3.LibDs3AllocateChunkResponse)()
error = libds3.lib.ds3_allocate_chunk(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3AllocateChunkResponse(response)
libds3.lib.ds3_free_allocate_chunk_response(response)
return result
def getAvailableChunks(self, jobId):
'''
Returns a list of all chunks in a job that can currently be processed. It will return a subset of all chunks, and it
will return that same set of chunks until all the data in one of the chunks returned has been either completely gotten,
or been completely put.
'''
request = libds3.lib.ds3_init_get_available_chunks(jobId)
response = POINTER(libds3.LibDs3GetAvailableChunksResponse)()
error = libds3.lib.ds3_get_available_chunks(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3AvailableChunksResponse(response)
libds3.lib.ds3_free_available_chunks_response(response)
return result
def _sendJobRequest(self, func, request):
response = POINTER(libds3.LibDs3BulkResponse)()
error = func(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bulkResponse = Ds3BulkPlan(response)
libds3.lib.ds3_free_bulk_response(response)
return bulkResponse
def getJob(self, jobId):
'''
Returns information about a job, including all the chunks in the job, as well as the status of the job.
'''
request = libds3.lib.ds3_init_get_job(jobId)
return self._sendJobRequest(libds3.lib.ds3_get_job, request)
def getJobs(self):
'''
Returns a list of all jobs.
'''
request = libds3.lib.ds3_init_get_jobs()
response = POINTER(libds3.LibDs3GetJobsResponse)()
error = libds3.lib.ds3_get_jobs(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = []
for index in xrange(0, response.contents.jobs_size):
result.append(Ds3BulkPlan(response.contents.jobs[index]))
libds3.lib.ds3_free_get_jobs_response(response)
return result
def putJob(self, jobId):
'''
Modifies a job to reset the timeout timer for the job.
'''
request = libds3.lib.ds3_init_put_job(jobId)
return self._sendJobRequest(libds3.lib.ds3_put_job, request)
def deleteJob(self, jobId):
'''
Cancels a currently in progress job.
'''
request = libds3.lib.ds3_init_delete_job(jobId)
error = libds3.lib.ds3_delete_job(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def getPhysicalPlacement(self, bucketName, fileNameList, fullDetails = False):
'''
Returns where in the Spectra S3 system each file in `fileNameList` is located.
'''
response = POINTER(libds3.LibDs3GetPhysicalPlacementResponse)()
bulkObjs = libds3.toDs3BulkObjectList(fileNameList)
bucketName=typeCheckString(bucketName)
if fullDetails:
request = libds3.lib.ds3_init_get_physical_placement(bucketName, bulkObjs)
else:
request = libds3.lib.ds3_init_get_physical_placement_full_details(bucketName, bulkObjs)
error = libds3.lib.ds3_get_physical_placement(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
placements = []
if response:
placements = arrayToList(response.contents.tapes, response.contents.num_tapes, lambda obj: obj.barcode.contents.value)
libds3.lib.ds3_free_get_physical_placement_response(response)
return placements
| apache-2.0 |
guru-digital/CouchPotatoServer | libs/git/config.py | 110 | 2432 | # Copyright (c) 2009, Rotem Yaari <vmalloc@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .exceptions import GitCommandFailedException
class GitConfiguration(object):
def __init__(self, repo):
super(GitConfiguration, self).__init__()
self.repo = repo
def setParameter(self, path, value, local = True):
self.repo._executeGitCommandAssertSuccess("config %s \"%s\" \"%s\"" % ("" if local else "--global", path, value))
def unsetParameter(self, path, local = True):
try:
self.repo._executeGitCommandAssertSuccess("config --unset %s \"%s\"" % ("" if local else "--global", path))
except GitCommandFailedException:
if self.getParameter(path) is not None:
raise
def getParameter(self, path):
return self.getDict().get(path, None)
def getDict(self):
return dict(line.strip().split("=", 1)
for line in self.repo._getOutputAssertSuccess("config -l").splitlines())
| gpl-3.0 |
hynnet/openwrt-mt7620 | staging_dir/host/lib/python2.7/_threading_local.py | 241 | 7456 | """Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that almost all platforms do have support for
# locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest on most boxes.
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = '_local__key', 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
current_thread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = current_thread().__dict__.get(key)
if d is None:
d = {}
current_thread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__(self):
import threading
key = object.__getattribute__(self, '_local__key')
try:
# We use the non-locking API since we might already hold the lock
# (__del__ can be called at any point by the cyclic GC).
threads = threading._enumerate()
except:
# If enumerating the current threads fails, as it seems to do
# during shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up.
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace.
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
from threading import current_thread, RLock
| gpl-2.0 |
gauravbose/digital-menu | digimenu2/build/lib.linux-x86_64-2.7/django/contrib/sessions/models.py | 82 | 2229 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
class SessionManager(models.Manager):
use_in_migrations = True
def encode(self, session_dict):
"""
Returns the given session dictionary serialized and encoded as a string.
"""
return SessionStore().encode(session_dict)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delete() # Clear sessions with no data.
return s
@python_2_unicode_compatible
class Session(models.Model):
"""
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
The Django sessions framework is entirely cookie-based. It does
not fall back to putting session IDs in URLs. This is an intentional
design decision. Not only does that behavior make URLs ugly, it makes
your site vulnerable to session-ID theft via the "Referer" header.
For complete documentation on using Sessions in your code, consult
the sessions documentation that is shipped with Django (also available
on the Django Web site).
"""
session_key = models.CharField(_('session key'), max_length=40,
primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expire date'), db_index=True)
objects = SessionManager()
class Meta:
db_table = 'django_session'
verbose_name = _('session')
verbose_name_plural = _('sessions')
def __str__(self):
return self.session_key
def get_decoded(self):
return SessionStore().decode(self.session_data)
# At bottom to avoid circular import
from django.contrib.sessions.backends.db import SessionStore # isort:skip
| bsd-3-clause |
jimi-c/ansible | test/units/playbook/test_helpers.py | 119 | 19184 | # (c) 2016, Adrian Likins <alikins@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import MagicMock
from units.mock.loader import DictDataLoader
from ansible import errors
from ansible.playbook.block import Block
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role.include import RoleInclude
from ansible.playbook import helpers
class MixinForMocks(object):
def _setup(self):
# This is not a very good mixin, lots of side effects
self.fake_loader = DictDataLoader({'include_test.yml': "",
'other_include_test.yml': ""})
self.mock_tqm = MagicMock(name='MockTaskQueueManager')
self.mock_play = MagicMock(name='MockPlay')
self.mock_iterator = MagicMock(name='MockIterator')
self.mock_iterator._play = self.mock_play
self.mock_inventory = MagicMock(name='MockInventory')
self.mock_inventory._hosts_cache = dict()
def _get_host(host_name):
return None
self.mock_inventory.get_host.side_effect = _get_host
# TODO: can we use a real VariableManager?
self.mock_variable_manager = MagicMock(name='MockVariableManager')
self.mock_variable_manager.get_vars.return_value = dict()
self.mock_block = MagicMock(name='MockBlock')
self.fake_role_loader = DictDataLoader({"/etc/ansible/roles/bogus_role/tasks/main.yml": """
- shell: echo 'hello world'
"""})
self._test_data_path = os.path.dirname(__file__)
self.fake_include_loader = DictDataLoader({"/dev/null/includes/test_include.yml": """
- include: other_test_include.yml
- shell: echo 'hello world'
""",
"/dev/null/includes/static_test_include.yml": """
- include: other_test_include.yml
- shell: echo 'hello static world'
""",
"/dev/null/includes/other_test_include.yml": """
- debug:
msg: other_test_include_debug
"""})
class TestLoadListOfTasks(unittest.TestCase, MixinForMocks):
def setUp(self):
self._setup()
def _assert_is_task_list(self, results):
for result in results:
self.assertIsInstance(result, Task)
def _assert_is_task_list_or_blocks(self, results):
self.assertIsInstance(results, list)
for result in results:
self.assertIsInstance(result, (Task, Block))
def test_ds_not_list(self):
ds = {}
self.assertRaises(AssertionError, helpers.load_list_of_tasks,
ds, self.mock_play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None)
def test_empty_task(self):
ds = [{}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"no action detected in task. This often indicates a misspelled module name, or incorrect module path",
helpers.load_list_of_tasks,
ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
def test_empty_task_use_handlers(self):
ds = [{}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"no action detected in task. This often indicates a misspelled module name, or incorrect module path",
helpers.load_list_of_tasks,
ds,
use_handlers=True,
play=self.mock_play,
variable_manager=self.mock_variable_manager,
loader=self.fake_loader)
def test_one_bogus_block(self):
ds = [{'block': None}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"A malformed block was encountered",
helpers.load_list_of_tasks,
ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
def test_unknown_action(self):
action_name = 'foo_test_unknown_action'
ds = [{'action': action_name}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self._assert_is_task_list_or_blocks(res)
self.assertEquals(res[0].action, action_name)
def test_block_unknown_action(self):
action_name = 'foo_test_block_unknown_action'
ds = [{
'block': [{'action': action_name}]
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self._assert_default_block(res[0])
def _assert_default_block(self, block):
# the expected defaults
self.assertIsInstance(block.block, list)
self.assertEquals(len(block.block), 1)
self.assertIsInstance(block.rescue, list)
self.assertEquals(len(block.rescue), 0)
self.assertIsInstance(block.always, list)
self.assertEquals(len(block.always), 0)
def test_block_unknown_action_use_handlers(self):
ds = [{
'block': [{'action': 'foo_test_block_unknown_action'}]
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self._assert_default_block(res[0])
def test_one_bogus_block_use_handlers(self):
ds = [{'block': True}]
self.assertRaisesRegexp(errors.AnsibleParserError,
"A malformed block was encountered",
helpers.load_list_of_tasks,
ds, play=self.mock_play, use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
def test_one_bogus_include(self):
ds = [{'include': 'somefile.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self.assertIsInstance(res, list)
self.assertEquals(len(res), 0)
def test_one_bogus_include_use_handlers(self):
ds = [{'include': 'somefile.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self.assertIsInstance(res, list)
self.assertEquals(len(res), 0)
def test_one_bogus_include_static(self):
ds = [{'include': 'somefile.yml',
'static': 'true'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_loader)
self.assertIsInstance(res, list)
self.assertEquals(len(res), 0)
def test_one_include(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self.assertEquals(len(res), 1)
self._assert_is_task_list_or_blocks(res)
def test_one_parent_include(self):
ds = [{'include': '/dev/null/includes/test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIsInstance(res[0]._parent, TaskInclude)
# TODO/FIXME: do this non deprecated way
def test_one_include_tags(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml',
'tags': ['test_one_include_tags_tag1', 'and_another_tagB']
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIn('test_one_include_tags_tag1', res[0].tags)
self.assertIn('and_another_tagB', res[0].tags)
# TODO/FIXME: do this non deprecated way
def test_one_parent_include_tags(self):
ds = [{'include': '/dev/null/includes/test_include.yml',
# 'vars': {'tags': ['test_one_parent_include_tags_tag1', 'and_another_tag2']}
'tags': ['test_one_parent_include_tags_tag1', 'and_another_tag2']
}
]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIn('test_one_parent_include_tags_tag1', res[0].tags)
self.assertIn('and_another_tag2', res[0].tags)
# It would be useful to be able to tell what kind of deprecation we encountered and where we encountered it.
def test_one_include_tags_deprecated_mixed(self):
ds = [{'include': "/dev/null/includes/other_test_include.yml",
'vars': {'tags': "['tag_on_include1', 'tag_on_include2']"},
'tags': 'mixed_tag1, mixed_tag2'
}]
self.assertRaisesRegexp(errors.AnsibleParserError, 'Mixing styles',
helpers.load_list_of_tasks,
ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
def test_one_include_tags_deprecated_include(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml',
'vars': {'tags': ['include_tag1_deprecated', 'and_another_tagB_deprecated']}
}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Block)
self.assertIn('include_tag1_deprecated', res[0].tags)
self.assertIn('and_another_tagB_deprecated', res[0].tags)
def test_one_include_use_handlers(self):
ds = [{'include': '/dev/null/includes/other_test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Handler)
def test_one_parent_include_use_handlers(self):
ds = [{'include': '/dev/null/includes/test_include.yml'}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
use_handlers=True,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Handler)
# default for Handler
self.assertEquals(res[0].listen, None)
# TODO/FIXME: this doesn't seen right
# figure out how to get the non-static errors to be raised, this seems to just ignore everything
def test_one_include_not_static(self):
ds = [{
'include': '/dev/null/includes/static_test_include.yml',
'static': False
}]
# a_block = Block()
ti_ds = {'include': '/dev/null/includes/ssdftatic_test_include.yml'}
a_task_include = TaskInclude()
ti = a_task_include.load(ti_ds)
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
block=ti,
variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
self._assert_is_task_list_or_blocks(res)
self.assertIsInstance(res[0], Task)
self.assertEquals(res[0].args['_raw_params'], '/dev/null/includes/static_test_include.yml')
# TODO/FIXME: This two get stuck trying to make a mock_block into a TaskInclude
# def test_one_include(self):
# ds = [{'include': 'other_test_include.yml'}]
# res = helpers.load_list_of_tasks(ds, play=self.mock_play,
# block=self.mock_block,
# variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
# print(res)
# def test_one_parent_include(self):
# ds = [{'include': 'test_include.yml'}]
# res = helpers.load_list_of_tasks(ds, play=self.mock_play,
# block=self.mock_block,
# variable_manager=self.mock_variable_manager, loader=self.fake_include_loader)
# print(res)
def test_one_bogus_include_role(self):
ds = [{'include_role': {'name': 'bogus_role'}}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play,
block=self.mock_block,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
self.assertEquals(len(res), 1)
self._assert_is_task_list_or_blocks(res)
def test_one_bogus_include_role_use_handlers(self):
ds = [{'include_role': {'name': 'bogus_role'}}]
res = helpers.load_list_of_tasks(ds, play=self.mock_play, use_handlers=True,
block=self.mock_block,
variable_manager=self.mock_variable_manager,
loader=self.fake_role_loader)
self.assertEquals(len(res), 1)
self._assert_is_task_list_or_blocks(res)
class TestLoadListOfRoles(unittest.TestCase, MixinForMocks):
def setUp(self):
self._setup()
def test_ds_not_list(self):
ds = {}
self.assertRaises(AssertionError, helpers.load_list_of_roles,
ds, self.mock_play)
def test_empty_role(self):
ds = [{}]
self.assertRaisesRegexp(errors.AnsibleError,
"role definitions must contain a role name",
helpers.load_list_of_roles,
ds, self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
def test_empty_role_just_name(self):
ds = [{'name': 'bogus_role'}]
res = helpers.load_list_of_roles(ds, self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
self.assertIsInstance(res, list)
for r in res:
self.assertIsInstance(r, RoleInclude)
def test_block_unknown_action(self):
ds = [{
'block': [{'action': 'foo_test_block_unknown_action'}]
}]
ds = [{'name': 'bogus_role'}]
res = helpers.load_list_of_roles(ds, self.mock_play,
variable_manager=self.mock_variable_manager, loader=self.fake_role_loader)
self.assertIsInstance(res, list)
for r in res:
self.assertIsInstance(r, RoleInclude)
class TestLoadListOfBlocks(unittest.TestCase, MixinForMocks):
def setUp(self):
self._setup()
def test_ds_not_list(self):
ds = {}
mock_play = MagicMock(name='MockPlay')
self.assertRaises(AssertionError, helpers.load_list_of_blocks,
ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None)
def test_empty_block(self):
ds = [{}]
mock_play = MagicMock(name='MockPlay')
self.assertRaisesRegexp(errors.AnsibleParserError,
"no action detected in task. This often indicates a misspelled module name, or incorrect module path",
helpers.load_list_of_blocks,
ds, mock_play,
parent_block=None,
role=None,
task_include=None,
use_handlers=False,
variable_manager=None,
loader=None)
def test_block_unknown_action(self):
ds = [{'action': 'foo'}]
mock_play = MagicMock(name='MockPlay')
res = helpers.load_list_of_blocks(ds, mock_play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None,
loader=None)
self.assertIsInstance(res, list)
for block in res:
self.assertIsInstance(block, Block)
| gpl-3.0 |
mdietrichc2c/carrier-delivery | __unported__/delivery_carrier_label_dispatch/wizard/generate_labels.py | 2 | 3670 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from ..pdf_utils import assemble_pdf
from openerp.osv import orm, fields
from tools.translate import _
class DeliveryCarrierLabelGenerate(orm.TransientModel):
_name = 'delivery.carrier.label.generate'
def _get_dispatch_ids(self, cr, uid, context=None):
if context is None:
context = {}
res = False
if (context.get('active_model') == 'picking.dispatch'
and context.get('active_ids')):
res = context['active_ids']
return res
_columns = {
'dispatch_ids': fields.many2many('picking.dispatch',
string='Picking Dispatch'),
'label_pdf_file': fields.binary('Labels file'),
}
_defaults = {
'dispatch_ids': _get_dispatch_ids,
}
def action_generate_labels(self, cr, uid, ids, context=None):
"""
Call the creation of the delivery carrier label
of the missing labels and get the existing ones
Then merge all of them in a single PDF
"""
this = self.browse(cr, uid, ids, context=context)[0]
if not this.dispatch_ids:
raise orm.except_orm(_('Error'), _('No picking dispatch selected'))
picking_out_obj = self.pool.get('stock.picking.out')
# flatten all picking in one list to keep the order in case
# there are multiple dispatch or if pickings
# have been ordered to ease packaging
pickings = [(pick, pick.get_pdf_label()[pick.id])
for dispatch in this.dispatch_ids
for pick in dispatch.related_picking_ids]
# get picking ids for which we want to generate pdf label
picking_ids = [pick.id for pick, pdf in pickings
if not pdf]
# generate missing picking labels
picking_out_obj.action_generate_carrier_label(cr, uid,
picking_ids,
#file_type='pdf',
context=context)
# Get all pdf files adding the newly generated ones
data_list = [pdf or pick.get_pdf_label()[pick.id]
for pick, pdf in pickings]
pdf_list = [data.decode('base64') for data in data_list if data]
pdf_file = assemble_pdf(pdf_list)
this.write({'label_pdf_file': pdf_file.encode('base64')})
return {
'type': 'ir.actions.act_window',
'res_model': 'delivery.carrier.label.generate',
'view_mode': 'form',
'view_type': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
| agpl-3.0 |
yasoob/PythonRSSReader | venv/lib/python2.7/site-packages/pip/commands/list.py | 269 | 7251 | from __future__ import absolute_import
import logging
from pip._vendor import pkg_resources
from pip.basecommand import Command
from pip.exceptions import DistributionNotFound
from pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions, dist_is_editable
from pip.wheel import WheelCache
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages (excluding editables)')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages (excluding editables)')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
elif options.editable:
self.run_editables(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, version, typ in self.find_packages_latest_versions(options):
if version > dist.parsed_version:
logger.info(
'%s (Current: %s Latest: %s [%s])',
dist.project_name, dist.version, version, typ,
)
def find_packages_latest_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in get_installed_distributions(local_only=options.local,
user_only=options.user):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
include_editables=False,
)
format_control = FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
for dist in installed_packages:
req = InstallRequirement.from_line(
dist.key, None, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
typ = 'unknown'
try:
link = finder.find_requirement(req, True)
# If link is None, means installed version is most
# up-to-date
if link is None:
continue
except DistributionNotFound:
continue
else:
canonical_name = pkg_resources.safe_name(req.name).lower()
formats = fmt_ctl_formats(format_control, canonical_name)
search = Search(
req.name,
canonical_name,
formats)
remote_version = finder._link_package_versions(
link, search).version
if link.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
yield dist, remote_version, typ
def run_listing(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
)
self.output_package_listing(installed_packages)
def run_editables(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=True,
)
self.output_package_listing(installed_packages)
def output_package_listing(self, installed_packages):
installed_packages = sorted(
installed_packages,
key=lambda dist: dist.project_name.lower(),
)
for dist in installed_packages:
if dist_is_editable(dist):
line = '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
line = '%s (%s)' % (dist.project_name, dist.version)
logger.info(line)
def run_uptodate(self, options):
uptodate = []
for dist, version, typ in self.find_packages_latest_versions(options):
if dist.parsed_version == version:
uptodate.append(dist)
self.output_package_listing(uptodate)
| mit |
pixelgremlins/ztruck | dj/lib/python2.7/site-packages/setuptools/command/alias.py | 467 | 2381 | from distutils.errors import DistutilsOptionError
from setuptools.command.setopt import edit_config, option_base, config_file
def shquote(arg):
"""Quote an argument for later parsing by shlex.split()"""
for c in '"', "'", "\\", "#":
if c in arg:
return repr(arg)
if arg.split() != [arg]:
return repr(arg)
return arg
class alias(option_base):
"""Define a shortcut that invokes one or more commands"""
description = "define a shortcut to invoke one or more commands"
command_consumes_arguments = True
user_options = [
('remove', 'r', 'remove (unset) the alias'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.args = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.remove and len(self.args) != 1:
raise DistutilsOptionError(
"Must specify exactly one argument (the alias name) when "
"using --remove"
)
def run(self):
aliases = self.distribution.get_option_dict('aliases')
if not self.args:
print("Command Aliases")
print("---------------")
for alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
elif len(self.args) == 1:
alias, = self.args
if self.remove:
command = None
elif alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
else:
print("No alias definition found for %r" % alias)
return
else:
alias = self.args[0]
command = ' '.join(map(shquote, self.args[1:]))
edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
def format_alias(name, aliases):
source, command = aliases[name]
if source == config_file('global'):
source = '--global-config '
elif source == config_file('user'):
source = '--user-config '
elif source == config_file('local'):
source = ''
else:
source = '--filename=%r' % source
return source + name + ' ' + command
| apache-2.0 |
njmube/erpnext | erpnext/accounts/report/accounts_receivable/accounts_receivable.py | 10 | 11261 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, scrub
from frappe.utils import getdate, nowdate, flt, cint
class ReceivablePayableReport(object):
def __init__(self, filters=None):
self.filters = frappe._dict(filters or {})
self.filters.report_date = getdate(self.filters.report_date or nowdate())
self.age_as_on = getdate(nowdate()) \
if self.filters.report_date > getdate(nowdate()) \
else self.filters.report_date
def run(self, args):
party_naming_by = frappe.db.get_value(args.get("naming_by")[0], None, args.get("naming_by")[1])
columns = self.get_columns(party_naming_by, args)
data = self.get_data(party_naming_by, args)
chart = self.get_chart_data(columns, data)
return columns, data, None, chart
def get_columns(self, party_naming_by, args):
columns = [_("Posting Date") + ":Date:80", _(args.get("party_type")) + ":Link/" + args.get("party_type") + ":200"]
if party_naming_by == "Naming Series":
columns += [args.get("party_type") + " Name::110"]
columns += [_("Voucher Type") + "::110", _("Voucher No") + ":Dynamic Link/"+_("Voucher Type")+":120",
_("Due Date") + ":Date:80"]
if args.get("party_type") == "Supplier":
columns += [_("Bill No") + "::80", _("Bill Date") + ":Date:80"]
for label in ("Invoiced Amount", "Paid Amount", "Outstanding Amount"):
columns.append({
"label": label,
"fieldtype": "Currency",
"options": "currency",
"width": 120
})
columns += [_("Age (Days)") + ":Int:80"]
self.ageing_col_idx_start = len(columns)
if not "range1" in self.filters:
self.filters["range1"] = "30"
if not "range2" in self.filters:
self.filters["range2"] = "60"
if not "range3" in self.filters:
self.filters["range3"] = "90"
for label in ("0-{range1}".format(range1=self.filters["range1"]),
"{range1}-{range2}".format(range1=cint(self.filters["range1"])+ 1, range2=self.filters["range2"]),
"{range2}-{range3}".format(range2=cint(self.filters["range2"])+ 1, range3=self.filters["range3"]),
"{range3}-{above}".format(range3=cint(self.filters["range3"])+ 1, above=_("Above"))):
columns.append({
"label": label,
"fieldtype": "Currency",
"options": "currency",
"width": 120
})
columns.append({
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"width": 100
})
if args.get("party_type") == "Customer":
columns += [_("Territory") + ":Link/Territory:80"]
if args.get("party_type") == "Supplier":
columns += [_("Supplier Type") + ":Link/Supplier Type:80"]
columns.append(_("Remarks") + "::200")
return columns
def get_data(self, party_naming_by, args):
from erpnext.accounts.utils import get_currency_precision
currency_precision = get_currency_precision() or 2
dr_or_cr = "debit" if args.get("party_type") == "Customer" else "credit"
voucher_details = self.get_voucher_details(args.get("party_type"))
future_vouchers = self.get_entries_after(self.filters.report_date, args.get("party_type"))
if not self.filters.get("company"):
self.filters["company"] = frappe.db.get_single_value('Global Defaults', 'default_company')
company_currency = frappe.db.get_value("Company", self.filters.get("company"), "default_currency")
data = []
for gle in self.get_entries_till(self.filters.report_date, args.get("party_type")):
if self.is_receivable_or_payable(gle, dr_or_cr, future_vouchers):
outstanding_amount = flt(self.get_outstanding_amount(gle,
self.filters.report_date, dr_or_cr), currency_precision)
if abs(outstanding_amount) > 0.1/10**currency_precision:
row = [gle.posting_date, gle.party]
# customer / supplier name
if party_naming_by == "Naming Series":
row += [self.get_party_name(gle.party_type, gle.party)]
# get due date
due_date = voucher_details.get(gle.voucher_no, {}).get("due_date", "")
row += [gle.voucher_type, gle.voucher_no, due_date]
# get supplier bill details
if args.get("party_type") == "Supplier":
row += [
voucher_details.get(gle.voucher_no, {}).get("bill_no", ""),
voucher_details.get(gle.voucher_no, {}).get("bill_date", "")
]
# invoiced and paid amounts
invoiced_amount = gle.get(dr_or_cr) if (gle.get(dr_or_cr) > 0) else 0
paid_amt = invoiced_amount - outstanding_amount
row += [invoiced_amount, paid_amt, outstanding_amount]
# ageing data
entry_date = due_date if self.filters.ageing_based_on == "Due Date" else gle.posting_date
row += get_ageing_data(cint(self.filters.range1), cint(self.filters.range2),
cint(self.filters.range3), self.age_as_on, entry_date, outstanding_amount)
# issue 6371-Ageing buckets should not have amounts if due date is not reached
if self.filters.ageing_based_on == "Due Date" and getdate(due_date) > getdate(self.filters.report_date):
row[-1]=row[-2]=row[-3]=row[-4]=0
if self.filters.get(scrub(args.get("party_type"))):
row.append(gle.account_currency)
else:
row.append(company_currency)
# customer territory / supplier type
if args.get("party_type") == "Customer":
row += [self.get_territory(gle.party)]
if args.get("party_type") == "Supplier":
row += [self.get_supplier_type(gle.party)]
row.append(gle.remarks)
data.append(row)
return data
def get_entries_after(self, report_date, party_type):
# returns a distinct list
return list(set([(e.voucher_type, e.voucher_no) for e in self.get_gl_entries(party_type)
if getdate(e.posting_date) > report_date]))
def get_entries_till(self, report_date, party_type):
# returns a generator
return (e for e in self.get_gl_entries(party_type)
if getdate(e.posting_date) <= report_date)
def is_receivable_or_payable(self, gle, dr_or_cr, future_vouchers):
return (
# advance
(not gle.against_voucher) or
# against sales order/purchase order
(gle.against_voucher_type in ["Sales Order", "Purchase Order"]) or
# sales invoice/purchase invoice
(gle.against_voucher==gle.voucher_no and gle.get(dr_or_cr) > 0) or
# entries adjusted with future vouchers
((gle.against_voucher_type, gle.against_voucher) in future_vouchers)
)
def get_outstanding_amount(self, gle, report_date, dr_or_cr):
payment_amount = 0.0
for e in self.get_gl_entries_for(gle.party, gle.party_type, gle.voucher_type, gle.voucher_no):
if getdate(e.posting_date) <= report_date and e.name!=gle.name:
payment_amount += (flt(e.credit if gle.party_type == "Customer" else e.debit) - flt(e.get(dr_or_cr)))
return flt(gle.get(dr_or_cr)) - flt(gle.credit if gle.party_type == "Customer" else gle.debit) - payment_amount
def get_party_name(self, party_type, party_name):
return self.get_party_map(party_type).get(party_name, {}).get("customer_name" if party_type == "Customer" else "supplier_name") or ""
def get_territory(self, party_name):
return self.get_party_map("Customer").get(party_name, {}).get("territory") or ""
def get_supplier_type(self, party_name):
return self.get_party_map("Supplier").get(party_name, {}).get("supplier_type") or ""
def get_party_map(self, party_type):
if not hasattr(self, "party_map"):
if party_type == "Customer":
self.party_map = dict(((r.name, r) for r in frappe.db.sql("""select {0}, {1}, {2} from `tab{3}`"""
.format("name", "customer_name", "territory", party_type), as_dict=True)))
elif party_type == "Supplier":
self.party_map = dict(((r.name, r) for r in frappe.db.sql("""select {0}, {1}, {2} from `tab{3}`"""
.format("name", "supplier_name", "supplier_type", party_type), as_dict=True)))
return self.party_map
def get_voucher_details(self, party_type):
voucher_details = frappe._dict()
if party_type == "Customer":
for si in frappe.db.sql("""select name, due_date
from `tabSales Invoice` where docstatus=1""", as_dict=1):
voucher_details.setdefault(si.name, si)
if party_type == "Supplier":
for pi in frappe.db.sql("""select name, due_date, bill_no, bill_date
from `tabPurchase Invoice` where docstatus=1""", as_dict=1):
voucher_details.setdefault(pi.name, pi)
return voucher_details
def get_gl_entries(self, party_type):
if not hasattr(self, "gl_entries"):
conditions, values = self.prepare_conditions(party_type)
if self.filters.get(scrub(party_type)):
select_fields = "sum(debit_in_account_currency) as debit, sum(credit_in_account_currency) as credit"
else:
select_fields = "sum(debit) as debit, sum(credit) as credit"
self.gl_entries = frappe.db.sql("""select name, posting_date, account, party_type, party,
voucher_type, voucher_no, against_voucher_type, against_voucher,
account_currency, remarks, {0}
from `tabGL Entry`
where docstatus < 2 and party_type=%s and (party is not null and party != '') {1}
group by voucher_type, voucher_no, against_voucher_type, against_voucher, party
order by posting_date, party"""
.format(select_fields, conditions), values, as_dict=True)
return self.gl_entries
def prepare_conditions(self, party_type):
conditions = [""]
values = [party_type]
party_type_field = scrub(party_type)
if self.filters.company:
conditions.append("company=%s")
values.append(self.filters.company)
if self.filters.get(party_type_field):
conditions.append("party=%s")
values.append(self.filters.get(party_type_field))
return " and ".join(conditions), values
def get_gl_entries_for(self, party, party_type, against_voucher_type, against_voucher):
if not hasattr(self, "gl_entries_map"):
self.gl_entries_map = {}
for gle in self.get_gl_entries(party_type):
if gle.against_voucher_type and gle.against_voucher:
self.gl_entries_map.setdefault(gle.party, {})\
.setdefault(gle.against_voucher_type, {})\
.setdefault(gle.against_voucher, [])\
.append(gle)
return self.gl_entries_map.get(party, {})\
.get(against_voucher_type, {})\
.get(against_voucher, [])
def get_chart_data(self, columns, data):
ageing_columns = columns[self.ageing_col_idx_start : self.ageing_col_idx_start+4]
rows = []
for d in data:
rows.append(d[self.ageing_col_idx_start : self.ageing_col_idx_start+4])
if rows:
rows.insert(0, [[d.get("label")] for d in ageing_columns])
return {
"data": {
'rows': rows
},
"chart_type": 'pie'
}
def execute(filters=None):
args = {
"party_type": "Customer",
"naming_by": ["Selling Settings", "cust_master_name"],
}
return ReceivablePayableReport(filters).run(args)
def get_ageing_data(first_range, second_range, third_range, age_as_on, entry_date, outstanding_amount):
# [0-30, 30-60, 60-90, 90-above]
outstanding_range = [0.0, 0.0, 0.0, 0.0]
if not (age_as_on and entry_date):
return [0] + outstanding_range
age = (getdate(age_as_on) - getdate(entry_date)).days or 0
index = None
for i, days in enumerate([first_range, second_range, third_range]):
if age <= days:
index = i
break
if index is None: index = 3
outstanding_range[index] = outstanding_amount
return [age] + outstanding_range
| agpl-3.0 |
beiko-lab/gengis | bin/Lib/email/test/test_email_torture.py | 15 | 3805 | # Copyright (C) 2002-2004 Python Software Foundation
#
# A torture test of the email package. This should not be run as part of the
# standard Python test suite since it requires several meg of email messages
# collected in the wild. These source messages are not checked into the
# Python distro, but are available as part of the standalone email package at
# http://sf.net/projects/mimelib
import sys
import os
import unittest
from cStringIO import StringIO
from types import ListType
from email.test.test_email import TestEmailBase
from test.test_support import TestSkipped, run_unittest
import email
from email import __file__ as testfile
from email.iterators import _structure
def openfile(filename):
from os.path import join, dirname, abspath
path = abspath(join(dirname(testfile), os.pardir, 'moredata', filename))
return open(path, 'r')
# Prevent this test from running in the Python distro
try:
openfile('crispin-torture.txt')
except IOError:
raise TestSkipped
class TortureBase(TestEmailBase):
def _msgobj(self, filename):
fp = openfile(filename)
try:
msg = email.message_from_file(fp)
finally:
fp.close()
return msg
class TestCrispinTorture(TortureBase):
# Mark Crispin's torture test from the SquirrelMail project
def test_mondo_message(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
msg = self._msgobj('crispin-torture.txt')
payload = msg.get_payload()
eq(type(payload), ListType)
eq(len(payload), 12)
eq(msg.preamble, None)
eq(msg.epilogue, '\n')
# Probably the best way to verify the message is parsed correctly is to
# dump its structure and compare it against the known structure.
fp = StringIO()
_structure(msg, fp=fp)
neq(fp.getvalue(), """\
multipart/mixed
text/plain
message/rfc822
multipart/alternative
text/plain
multipart/mixed
text/richtext
application/andrew-inset
message/rfc822
audio/basic
audio/basic
image/pbm
message/rfc822
multipart/mixed
multipart/mixed
text/plain
audio/x-sun
multipart/mixed
image/gif
image/gif
application/x-be2
application/atomicmail
audio/x-sun
message/rfc822
multipart/mixed
text/plain
image/pgm
text/plain
message/rfc822
multipart/mixed
text/plain
image/pbm
message/rfc822
application/postscript
image/gif
message/rfc822
multipart/mixed
audio/basic
audio/basic
message/rfc822
multipart/mixed
application/postscript
text/plain
message/rfc822
multipart/mixed
text/plain
multipart/parallel
image/gif
audio/basic
application/atomicmail
message/rfc822
audio/x-sun
""")
def _testclasses():
mod = sys.modules[__name__]
return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')]
def suite():
suite = unittest.TestSuite()
for testclass in _testclasses():
suite.addTest(unittest.makeSuite(testclass))
return suite
def test_main():
for testclass in _testclasses():
run_unittest(testclass)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| gpl-3.0 |
ofer43211/unisubs | apps/videos/migrations/0004_auto__add_field_video_widget_views_count.py | 5 | 9784 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Video.widget_views_count'
db.add_column('videos_video', 'widget_views_count', self.gf('django.db.models.fields.IntegerField')(default=0))
if not db.dry_run:
for obj in orm.Video.objects.all():
obj.widget_views_count = 1
obj.save()
def backwards(self, orm):
# Deleting field 'Video.widget_views_count'
db.delete_column('videos_video', 'widget_views_count')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'videos.nulltranslations': {
'Meta': {'object_name': 'NullTranslations'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.nullvideocaptions': {
'Meta': {'object_name': 'NullVideoCaptions'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.translation': {
'Meta': {'object_name': 'Translation'},
'caption_id': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'null_translations': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.NullTranslations']", 'null': 'True'}),
'translation_text': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.TranslationVersion']", 'null': 'True'})
},
'videos.translationlanguage': {
'Meta': {'object_name': 'TranslationLanguage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.translationversion': {
'Meta': {'object_name': 'TranslationVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.TranslationLanguage']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'video_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '2048'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.User']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'youtube_videoid': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'videos.videocaption': {
'Meta': {'object_name': 'VideoCaption'},
'caption_id': ('django.db.models.fields.IntegerField', [], {}),
'caption_text': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'end_time': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'null_captions': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.NullVideoCaptions']", 'null': 'True'}),
'start_time': ('django.db.models.fields.FloatField', [], {}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.VideoCaptionVersion']", 'null': 'True'})
},
'videos.videocaptionversion': {
'Meta': {'object_name': 'VideoCaptionVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
}
}
complete_apps = ['videos']
| agpl-3.0 |
marcosmodesto/django-testapp | django/django/contrib/localflavor/si/si_postalcodes.py | 89 | 13570 | # *-* coding: utf-8 *-*
SI_POSTALCODES = [
(1000, u'Ljubljana'),
(1215, u'Medvode'),
(1216, u'Smlednik'),
(1217, u'Vodice'),
(1218, u'Komenda'),
(1219, u'Laze v Tuhinju'),
(1221, u'Motnik'),
(1222, u'Trojane'),
(1223, u'Blagovica'),
(1225, u'Lukovica'),
(1230, u'Dom\u017eale'),
(1233, u'Dob'),
(1234, u'Menge\u0161'),
(1235, u'Radomlje'),
(1236, u'Trzin'),
(1241, u'Kamnik'),
(1242, u'Stahovica'),
(1251, u'Morav\u010de'),
(1252, u'Va\u010de'),
(1262, u'Dol pri Ljubljani'),
(1270, u'Litija'),
(1272, u'Pol\u0161nik'),
(1273, u'Dole pri Litiji'),
(1274, u'Gabrovka'),
(1275, u'\u0160martno pri Litiji'),
(1276, u'Primskovo'),
(1281, u'Kresnice'),
(1282, u'Sava'),
(1290, u'Grosuplje'),
(1291, u'\u0160kofljica'),
(1292, u'Ig'),
(1293, u'\u0160marje - Sap'),
(1294, u'Vi\u0161nja Gora'),
(1295, u'Ivan\u010dna Gorica'),
(1296, u'\u0160entvid pri Sti\u010dni'),
(1301, u'Krka'),
(1303, u'Zagradec'),
(1310, u'Ribnica'),
(1311, u'Turjak'),
(1312, u'Videm - Dobrepolje'),
(1313, u'Struge'),
(1314, u'Rob'),
(1315, u'Velike La\u0161\u010de'),
(1316, u'Ortnek'),
(1317, u'Sodra\u017eica'),
(1318, u'Lo\u0161ki Potok'),
(1319, u'Draga'),
(1330, u'Ko\u010devje'),
(1331, u'Dolenja vas'),
(1332, u'Stara Cerkev'),
(1336, u'Kostel'),
(1337, u'Osilnica'),
(1338, u'Ko\u010devska Reka'),
(1351, u'Brezovica pri Ljubljani'),
(1352, u'Preserje'),
(1353, u'Borovnica'),
(1354, u'Horjul'),
(1355, u'Polhov Gradec'),
(1356, u'Dobrova'),
(1357, u'Notranje Gorice'),
(1358, u'Log pri Brezovici'),
(1360, u'Vrhnika'),
(1370, u'Logatec'),
(1372, u'Hotedr\u0161ica'),
(1373, u'Rovte'),
(1380, u'Cerknica'),
(1381, u'Rakek'),
(1382, u'Begunje pri Cerknici'),
(1384, u'Grahovo'),
(1385, u'Nova vas'),
(1386, u'Stari trg pri Lo\u017eu'),
(1410, u'Zagorje ob Savi'),
(1411, u'Izlake'),
(1412, u'Kisovec'),
(1413, u'\u010cem\u0161enik'),
(1414, u'Podkum'),
(1420, u'Trbovlje'),
(1423, u'Dobovec'),
(1430, u'Hrastnik'),
(1431, u'Dol pri Hrastniku'),
(1432, u'Zidani Most'),
(1433, u'Rade\u010de'),
(1434, u'Loka pri Zidanem Mostu'),
(2000, u'Maribor'),
(2201, u'Zgornja Kungota'),
(2204, u'Miklav\u017e na Dravskem polju'),
(2205, u'Star\u0161e'),
(2206, u'Marjeta na Dravskem polju'),
(2208, u'Pohorje'),
(2211, u'Pesnica pri Mariboru'),
(2212, u'\u0160entilj v Slovenskih goricah'),
(2213, u'Zgornja Velka'),
(2214, u'Sladki vrh'),
(2215, u'Cer\u0161ak'),
(2221, u'Jarenina'),
(2222, u'Jakobski Dol'),
(2223, u'Jurovski Dol'),
(2229, u'Male\u010dnik'),
(2230, u'Lenart v Slovenskih goricah'),
(2231, u'Pernica'),
(2232, u'Voli\u010dina'),
(2233, u'Sveta Ana v Slovenskih goricah'),
(2234, u'Benedikt'),
(2235, u'Sveta Trojica v Slovenskih goricah'),
(2236, u'Cerkvenjak'),
(2241, u'Spodnji Duplek'),
(2242, u'Zgornja Korena'),
(2250, u'Ptuj'),
(2252, u'Dornava'),
(2253, u'Destrnik'),
(2254, u'Trnovska vas'),
(2255, u'Vitomarci'),
(2256, u'Jur\u0161inci'),
(2257, u'Polen\u0161ak'),
(2258, u'Sveti Toma\u017e'),
(2259, u'Ivanjkovci'),
(2270, u'Ormo\u017e'),
(2272, u'Gori\u0161nica'),
(2273, u'Podgorci'),
(2274, u'Velika Nedelja'),
(2275, u'Miklav\u017e pri Ormo\u017eu'),
(2276, u'Kog'),
(2277, u'Sredi\u0161\u010de ob Dravi'),
(2281, u'Markovci'),
(2282, u'Cirkulane'),
(2283, u'Zavr\u010d'),
(2284, u'Videm pri Ptuju'),
(2285, u'Zgornji Leskovec'),
(2286, u'Podlehnik'),
(2287, u'\u017detale'),
(2288, u'Hajdina'),
(2289, u'Stoperce'),
(2310, u'Slovenska Bistrica'),
(2311, u'Ho\u010de'),
(2312, u'Orehova vas'),
(2313, u'Fram'),
(2314, u'Zgornja Polskava'),
(2315, u'\u0160martno na Pohorju'),
(2316, u'Zgornja Lo\u017enica'),
(2317, u'Oplotnica'),
(2318, u'Laporje'),
(2319, u'Polj\u010dane'),
(2321, u'Makole'),
(2322, u'Maj\u0161perk'),
(2323, u'Ptujska Gora'),
(2324, u'Lovrenc na Dravskem polju'),
(2325, u'Kidri\u010devo'),
(2326, u'Cirkovce'),
(2327, u'Ra\u010de'),
(2331, u'Pragersko'),
(2341, u'Limbu\u0161'),
(2342, u'Ru\u0161e'),
(2343, u'Fala'),
(2344, u'Lovrenc na Pohorju'),
(2345, u'Bistrica ob Dravi'),
(2351, u'Kamnica'),
(2352, u'Selnica ob Dravi'),
(2353, u'Sv. Duh na Ostrem Vrhu'),
(2354, u'Bresternica'),
(2360, u'Radlje ob Dravi'),
(2361, u'O\u017ebalt'),
(2362, u'Kapla'),
(2363, u'Podvelka'),
(2364, u'Ribnica na Pohorju'),
(2365, u'Vuhred'),
(2366, u'Muta'),
(2367, u'Vuzenica'),
(2370, u'Dravograd'),
(2371, u'Trbonje'),
(2372, u'Libeli\u010de'),
(2373, u'\u0160entjan\u017e pri Dravogradu'),
(2380, u'Slovenj Gradec'),
(2381, u'Podgorje pri Slovenj Gradcu'),
(2382, u'Mislinja'),
(2383, u'\u0160martno pri Slovenj Gradcu'),
(2390, u'Ravne na Koro\u0161kem'),
(2391, u'Prevalje'),
(2392, u'Me\u017eica'),
(2393, u'\u010crna na Koro\u0161kem'),
(2394, u'Kotlje'),
(3000, u'Celje'),
(3201, u'\u0160martno v Ro\u017eni dolini'),
(3202, u'Ljube\u010dna'),
(3203, u'Nova Cerkev'),
(3204, u'Dobrna'),
(3205, u'Vitanje'),
(3206, u'Stranice'),
(3210, u'Slovenske Konjice'),
(3211, u'\u0160kofja vas'),
(3212, u'Vojnik'),
(3213, u'Frankolovo'),
(3214, u'Zre\u010de'),
(3215, u'Lo\u010de'),
(3220, u'\u0160tore'),
(3221, u'Teharje'),
(3222, u'Dramlje'),
(3223, u'Loka pri \u017dusmu'),
(3224, u'Dobje pri Planini'),
(3225, u'Planina pri Sevnici'),
(3230, u'\u0160entjur'),
(3231, u'Grobelno'),
(3232, u'Ponikva'),
(3233, u'Kalobje'),
(3240, u'\u0160marje pri Jel\u0161ah'),
(3241, u'Podplat'),
(3250, u'Roga\u0161ka Slatina'),
(3252, u'Rogatec'),
(3253, u'Pristava pri Mestinju'),
(3254, u'Pod\u010detrtek'),
(3255, u'Bu\u010de'),
(3256, u'Bistrica ob Sotli'),
(3257, u'Podsreda'),
(3260, u'Kozje'),
(3261, u'Lesi\u010dno'),
(3262, u'Prevorje'),
(3263, u'Gorica pri Slivnici'),
(3264, u'Sveti \u0160tefan'),
(3270, u'La\u0161ko'),
(3271, u'\u0160entrupert'),
(3272, u'Rimske Toplice'),
(3273, u'Jurklo\u0161ter'),
(3301, u'Petrov\u010de'),
(3302, u'Gri\u017ee'),
(3303, u'Gomilsko'),
(3304, u'Tabor'),
(3305, u'Vransko'),
(3310, u'\u017dalec'),
(3311, u'\u0160empeter v Savinjski dolini'),
(3312, u'Prebold'),
(3313, u'Polzela'),
(3314, u'Braslov\u010de'),
(3320, u'Velenje - dostava'),
(3322, u'Velenje - po\u0161tni predali'),
(3325, u'\u0160o\u0161tanj'),
(3326, u'Topol\u0161ica'),
(3327, u'\u0160martno ob Paki'),
(3330, u'Mozirje'),
(3331, u'Nazarje'),
(3332, u'Re\u010dica ob Savinji'),
(3333, u'Ljubno ob Savinji'),
(3334, u'Lu\u010de'),
(3335, u'Sol\u010dava'),
(3341, u'\u0160martno ob Dreti'),
(3342, u'Gornji Grad'),
(4000, u'Kranj'),
(4201, u'Zgornja Besnica'),
(4202, u'Naklo'),
(4203, u'Duplje'),
(4204, u'Golnik'),
(4205, u'Preddvor'),
(4206, u'Zgornje Jezersko'),
(4207, u'Cerklje na Gorenjskem'),
(4208, u'\u0160en\u010dur'),
(4209, u'\u017dabnica'),
(4210, u'Brnik - aerodrom'),
(4211, u'Mav\u010di\u010de'),
(4212, u'Visoko'),
(4220, u'\u0160kofja Loka'),
(4223, u'Poljane nad \u0160kofjo Loko'),
(4224, u'Gorenja vas'),
(4225, u'Sovodenj'),
(4226, u'\u017diri'),
(4227, u'Selca'),
(4228, u'\u017delezniki'),
(4229, u'Sorica'),
(4240, u'Radovljica'),
(4243, u'Brezje'),
(4244, u'Podnart'),
(4245, u'Kropa'),
(4246, u'Kamna Gorica'),
(4247, u'Zgornje Gorje'),
(4248, u'Lesce'),
(4260, u'Bled'),
(4263, u'Bohinjska Bela'),
(4264, u'Bohinjska Bistrica'),
(4265, u'Bohinjsko jezero'),
(4267, u'Srednja vas v Bohinju'),
(4270, u'Jesenice'),
(4273, u'Blejska Dobrava'),
(4274, u'\u017dirovnica'),
(4275, u'Begunje na Gorenjskem'),
(4276, u'Hru\u0161ica'),
(4280, u'Kranjska Gora'),
(4281, u'Mojstrana'),
(4282, u'Gozd Martuljek'),
(4283, u'Rate\u010de - Planica'),
(4290, u'Tr\u017ei\u010d'),
(4294, u'Kri\u017ee'),
(5000, u'Nova Gorica'),
(5210, u'Deskle'),
(5211, u'Kojsko'),
(5212, u'Dobrovo v Brdih'),
(5213, u'Kanal'),
(5214, u'Kal nad Kanalom'),
(5215, u'Ro\u010dinj'),
(5216, u'Most na So\u010di'),
(5220, u'Tolmin'),
(5222, u'Kobarid'),
(5223, u'Breginj'),
(5224, u'Srpenica'),
(5230, u'Bovec'),
(5231, u'Log pod Mangartom'),
(5232, u'So\u010da'),
(5242, u'Grahovo ob Ba\u010di'),
(5243, u'Podbrdo'),
(5250, u'Solkan'),
(5251, u'Grgar'),
(5252, u'Trnovo pri Gorici'),
(5253, u'\u010cepovan'),
(5261, u'\u0160empas'),
(5262, u'\u010crni\u010de'),
(5263, u'Dobravlje'),
(5270, u'Ajdov\u0161\u010dina'),
(5271, u'Vipava'),
(5272, u'Podnanos'),
(5273, u'Col'),
(5274, u'\u010crni Vrh nad Idrijo'),
(5275, u'Godovi\u010d'),
(5280, u'Idrija'),
(5281, u'Spodnja Idrija'),
(5282, u'Cerkno'),
(5283, u'Slap ob Idrijci'),
(5290, u'\u0160empeter pri Gorici'),
(5291, u'Miren'),
(5292, u'Ren\u010de'),
(5293, u'Vol\u010dja Draga'),
(5294, u'Dornberk'),
(5295, u'Branik'),
(5296, u'Kostanjevica na Krasu'),
(5297, u'Prva\u010dina'),
(6000, u'Koper'),
(6210, u'Se\u017eana'),
(6215, u'Diva\u010da'),
(6216, u'Podgorje'),
(6217, u'Vremski Britof'),
(6219, u'Lokev'),
(6221, u'Dutovlje'),
(6222, u'\u0160tanjel'),
(6223, u'Komen'),
(6224, u'Seno\u017ee\u010de'),
(6225, u'Hru\u0161evje'),
(6230, u'Postojna'),
(6232, u'Planina'),
(6240, u'Kozina'),
(6242, u'Materija'),
(6243, u'Obrov'),
(6244, u'Podgrad'),
(6250, u'Ilirska Bistrica'),
(6251, u'Ilirska Bistrica - Trnovo'),
(6253, u'Kne\u017eak'),
(6254, u'Jel\u0161ane'),
(6255, u'Prem'),
(6256, u'Ko\u0161ana'),
(6257, u'Pivka'),
(6258, u'Prestranek'),
(6271, u'Dekani'),
(6272, u'Gra\u010di\u0161\u010de'),
(6273, u'Marezige'),
(6274, u'\u0160marje'),
(6275, u'\u010crni Kal'),
(6276, u'Pobegi'),
(6280, u'Ankaran - Ancarano'),
(6281, u'\u0160kofije'),
(6310, u'Izola - Isola'),
(6320, u'Portoro\u017e - Portorose'),
(6330, u'Piran - Pirano'),
(6333, u'Se\u010dovlje - Sicciole'),
(8000, u'Novo mesto'),
(8210, u'Trebnje'),
(8211, u'Dobrni\u010d'),
(8212, u'Velika Loka'),
(8213, u'Veliki Gaber'),
(8216, u'Mirna Pe\u010d'),
(8220, u'\u0160marje\u0161ke Toplice'),
(8222, u'Oto\u010dec'),
(8230, u'Mokronog'),
(8231, u'Trebelno'),
(8232, u'\u0160entrupert'),
(8233, u'Mirna'),
(8250, u'Bre\u017eice'),
(8251, u'\u010cate\u017e ob Savi'),
(8253, u'Arti\u010de'),
(8254, u'Globoko'),
(8255, u'Pi\u0161ece'),
(8256, u'Sromlje'),
(8257, u'Dobova'),
(8258, u'Kapele'),
(8259, u'Bizeljsko'),
(8261, u'Jesenice na Dolenjskem'),
(8262, u'Kr\u0161ka vas'),
(8263, u'Cerklje ob Krki'),
(8270, u'Kr\u0161ko'),
(8272, u'Zdole'),
(8273, u'Leskovec pri Kr\u0161kem'),
(8274, u'Raka'),
(8275, u'\u0160kocjan'),
(8276, u'Bu\u010dka'),
(8280, u'Brestanica'),
(8281, u'Senovo'),
(8282, u'Koprivnica'),
(8283, u'Blanca'),
(8290, u'Sevnica'),
(8292, u'Zabukovje'),
(8293, u'Studenec'),
(8294, u'Bo\u0161tanj'),
(8295, u'Tr\u017ei\u0161\u010de'),
(8296, u'Krmelj'),
(8297, u'\u0160entjan\u017e'),
(8310, u'\u0160entjernej'),
(8311, u'Kostanjevica na Krki'),
(8312, u'Podbo\u010dje'),
(8321, u'Brusnice'),
(8322, u'Stopi\u010de'),
(8323, u'Ur\u0161na sela'),
(8330, u'Metlika'),
(8331, u'Suhor'),
(8332, u'Gradac'),
(8333, u'Semi\u010d'),
(8340, u'\u010crnomelj'),
(8341, u'Adle\u0161i\u010di'),
(8342, u'Stari trg ob Kolpi'),
(8343, u'Dragatu\u0161'),
(8344, u'Vinica pri \u010crnomlju'),
(8350, u'Dolenjske Toplice'),
(8351, u'Stra\u017ea'),
(8360, u'\u017du\u017eemberk'),
(8361, u'Dvor'),
(8362, u'Hinje'),
(9000, u'Murska Sobota'),
(9201, u'Puconci'),
(9202, u'Ma\u010dkovci'),
(9203, u'Petrovci'),
(9204, u'\u0160alovci'),
(9205, u'Hodo\u0161 - Hodos'),
(9206, u'Kri\u017eevci'),
(9207, u'Prosenjakovci - Partosfalva'),
(9208, u'Fokovci'),
(9220, u'Lendava - Lendva'),
(9221, u'Martjanci'),
(9222, u'Bogojina'),
(9223, u'Dobrovnik - Dobronak'),
(9224, u'Turni\u0161\u010de'),
(9225, u'Velika Polana'),
(9226, u'Moravske Toplice'),
(9227, u'Kobilje'),
(9231, u'Beltinci'),
(9232, u'\u010cren\u0161ovci'),
(9233, u'Odranci'),
(9240, u'Ljutomer'),
(9241, u'Ver\u017eej'),
(9242, u'Kri\u017eevci pri Ljutomeru'),
(9243, u'Mala Nedelja'),
(9244, u'Sveti Jurij ob \u0160\u010davnici'),
(9245, u'Spodnji Ivanjci'),
(9250, u'Gornja Radgona'),
(9251, u'Ti\u0161ina'),
(9252, u'Radenci'),
(9253, u'Apa\u010de'),
(9261, u'Cankova'),
(9262, u'Roga\u0161ovci'),
(9263, u'Kuzma'),
(9264, u'Grad'),
(9265, u'Bodonci'),
]
SI_POSTALCODES_CHOICES = sorted(SI_POSTALCODES, key=lambda k: k[1])
| bsd-3-clause |
trondhindenes/ansible-modules-core | cloud/google/gce_mig.py | 3 | 30256 | #!/usr/bin/python
# Copyright 2016 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_mig
version_added: "2.2"
short_description: Create, Update or Destroy a Managed Instance Group (MIG).
description:
- Create, Update or Destroy a Managed Instance Group (MIG). See
U(https://cloud.google.com/compute/docs/instance-groups) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
requirements:
- "python >= 2.6"
- "apache-libcloud >= 1.2.0"
notes:
- Resizing and Recreating VM are also supported.
- An existing instance template is required in order to create a
Managed Instance Group.
author:
- "Tom Melendez (@supertom) <tom@supertom.com>"
options:
name:
description:
- Name of the Managed Instance Group.
required: true
template:
description:
- Instance Template to be used in creating the VMs. See
U(https://cloud.google.com/compute/docs/instance-templates) to learn more
about Instance Templates. Required for creating MIGs.
required: false
size:
description:
- Size of Managed Instance Group. If MIG already exists, it will be
resized to the number provided here. Required for creating MIGs.
required: false
service_account_email:
description:
- service account email
required: false
default: null
credentials_file:
description:
- Path to the JSON file associated with the service account email
default: null
required: false
project_id:
description:
- GCE project ID
required: false
default: null
state:
description:
- desired state of the resource
required: false
default: "present"
choices: ["absent", "present"]
zone:
description:
- The GCE zone to use for this Managed Instance Group.
required: true
autoscaling:
description:
- A dictionary of configuration for the autoscaler. 'enabled (bool)', 'name (str)'
and policy.max_instances (int) are required fields if autoscaling is used. See
U(https://cloud.google.com/compute/docs/reference/beta/autoscalers) for more information
on Autoscaling.
required: false
default: null
named_ports:
version_added: "2.3"
description:
- Define named ports that backend services can forward data to. Format is a a list of
name:port dictionaries.
required: false
default: null
'''
EXAMPLES = '''
# Following playbook creates, rebuilds instances, resizes and then deletes a MIG.
# Notes:
# - Two valid Instance Templates must exist in your GCE project in order to run
# this playbook. Change the fields to match the templates used in your
# project.
# - The use of the 'pause' module is not required, it is just for convenience.
- name: Managed Instance Group Example
hosts: localhost
gather_facts: False
tasks:
- name: Create MIG
gce_mig:
name: ansible-mig-example
zone: us-central1-c
state: present
size: 1
template: my-instance-template-1
named_ports:
- name: http
port: 80
- name: foobar
port: 82
- pause: seconds=30
- name: Recreate MIG Instances with Instance Template change.
gce_mig:
name: ansible-mig-example
zone: us-central1-c
state: present
template: my-instance-template-2-small
recreate_instances: yes
- pause: seconds=30
- name: Resize MIG
gce_mig:
name: ansible-mig-example
zone: us-central1-c
state: present
size: 3
- name: Update MIG with Autoscaler
gce_mig:
name: ansible-mig-example
zone: us-central1-c
state: present
size: 3
template: my-instance-template-2-small
recreate_instances: yes
autoscaling:
enabled: yes
name: my-autoscaler
policy:
min_instances: 2
max_instances: 5
cool_down_period: 37
cpu_utilization:
target: .39
load_balancing_utilization:
target: 0.4
- pause: seconds=30
- name: Delete MIG
gce_mig:
name: ansible-mig-example
zone: us-central1-c
state: absent
autoscaling:
enabled: no
name: my-autoscaler
'''
RETURN = '''
zone:
description: Zone in which to launch MIG.
returned: always
type: string
sample: "us-central1-b"
template:
description: Instance Template to use for VMs. Must exist prior to using with MIG.
returned: changed
type: string
sample: "my-instance-template"
name:
description: Name of the Managed Instance Group.
returned: changed
type: string
sample: "my-managed-instance-group"
named_ports:
description: list of named ports acted upon
returned: when named_ports are initially set or updated
type: list
sample: [{ "name": "http", "port": 80 }, { "name": "foo", "port": 82 }]
size:
description: Number of VMs in Managed Instance Group.
returned: changed
type: integer
sample: 4
created_instances:
description: Names of instances created.
returned: When instances are created.
type: list
sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
deleted_instances:
description: Names of instances deleted.
returned: When instances are deleted.
type: list
sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
resize_created_instances:
description: Names of instances created during resizing.
returned: When a resize results in the creation of instances.
type: list
sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
resize_deleted_instances:
description: Names of instances deleted during resizing.
returned: When a resize results in the deletion of instances.
type: list
sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
recreated_instances:
description: Names of instances recreated.
returned: When instances are recreated.
type: list
sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
created_autoscaler:
description: True if Autoscaler was attempted and created. False otherwise.
returned: When the creation of an Autoscaler was attempted.
type: bool
sample: true
updated_autoscaler:
description: True if an Autoscaler update was attempted and succeeded.
False returned if update failed.
returned: When the update of an Autoscaler was attempted.
type: bool
sample: true
deleted_autoscaler:
description: True if an Autoscaler delete attempted and succeeded.
False returned if delete failed.
returned: When the delete of an Autoscaler was attempted.
type: bool
sample: true
set_named_ports:
description: True if the named_ports have been set
returned: named_ports have been set
type: bool
sample: true
updated_named_ports:
description: True if the named_ports have been updated
returned: named_ports have been updated
type: bool
sample: true
'''
import socket
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
def _check_params(params, field_list):
"""
Helper to validate params.
Use this in function definitions if they require specific fields
to be present.
:param params: structure that contains the fields
:type params: ``dict``
:param field_list: list of dict representing the fields
[{'name': str, 'required': True/False', 'type': cls}]
:type field_list: ``list`` of ``dict``
:return True, exits otherwise
:rtype: ``bool``
"""
for d in field_list:
if not d['name'] in params:
if d['required'] is True:
return (False, "%s is required and must be of type: %s" %
(d['name'], str(d['type'])))
else:
if not isinstance(params[d['name']], d['type']):
return (False,
"%s must be of type: %s" % (d['name'], str(d['type'])))
return (True, '')
def _validate_autoscaling_params(params):
"""
Validate that the minimum configuration is present for autoscaling.
:param params: Ansible dictionary containing autoscaling configuration
It is expected that autoscaling config will be found at the
key 'autoscaling'.
:type params: ``dict``
:return: Tuple containing a boolean and a string. True if autoscaler
is valid, False otherwise, plus str for message.
:rtype: ``(``bool``, ``str``)``
"""
if not params['autoscaling']:
# It's optional, so if not set at all, it's valid.
return (True, '')
if not isinstance(params['autoscaling'], dict):
return (False,
'autoscaling: configuration expected to be a dictionary.')
# check first-level required fields
as_req_fields = [
{'name': 'name', 'required': True, 'type': str},
{'name': 'enabled', 'required': True, 'type': bool},
{'name': 'policy', 'required': True, 'type': dict}
] # yapf: disable
(as_req_valid, as_req_msg) = _check_params(params['autoscaling'],
as_req_fields)
if not as_req_valid:
return (False, as_req_msg)
# check policy configuration
as_policy_fields = [
{'name': 'max_instances', 'required': True, 'type': int},
{'name': 'min_instances', 'required': False, 'type': int},
{'name': 'cool_down_period', 'required': False, 'type': int}
] # yapf: disable
(as_policy_valid, as_policy_msg) = _check_params(
params['autoscaling']['policy'], as_policy_fields)
if not as_policy_valid:
return (False, as_policy_msg)
# TODO(supertom): check utilization fields
return (True, '')
def _validate_named_port_params(params):
"""
Validate the named ports parameters
:param params: Ansible dictionary containing named_ports configuration
It is expected that autoscaling config will be found at the
key 'named_ports'. That key should contain a list of
{name : port} dictionaries.
:type params: ``dict``
:return: Tuple containing a boolean and a string. True if params
are valid, False otherwise, plus str for message.
:rtype: ``(``bool``, ``str``)``
"""
if not params['named_ports']:
# It's optional, so if not set at all, it's valid.
return (True, '')
if not isinstance(params['named_ports'], list):
return (False, 'named_ports: expected list of name:port dictionaries.')
req_fields = [
{'name': 'name', 'required': True, 'type': str},
{'name': 'port', 'required': True, 'type': int}
] # yapf: disable
for np in params['named_ports']:
(valid_named_ports, np_msg) = _check_params(np, req_fields)
if not valid_named_ports:
return (False, np_msg)
return (True, '')
def _get_instance_list(mig, field='name', filter_list=['NONE']):
"""
Helper to grab field from instances response.
:param mig: Managed Instance Group Object from libcloud.
:type mig: :class: `GCEInstanceGroupManager`
:param field: Field name in list_managed_instances response. Defaults
to 'name'.
:type field: ``str``
:param filter_list: list of 'currentAction' strings to filter on. Only
items that match a currentAction in this list will
be returned. Default is "['NONE']".
:type filter_list: ``list`` of ``str``
:return: List of strings from list_managed_instances response.
:rtype: ``list``
"""
return [x[field] for x in mig.list_managed_instances()
if x['currentAction'] in filter_list]
def _gen_gce_as_policy(as_params):
"""
Take Autoscaler params and generate GCE-compatible policy.
:param as_params: Dictionary in Ansible-playbook format
containing policy arguments.
:type as_params: ``dict``
:return: GCE-compatible policy dictionary
:rtype: ``dict``
"""
asp_data = {}
asp_data['maxNumReplicas'] = as_params['max_instances']
if 'min_instances' in as_params:
asp_data['minNumReplicas'] = as_params['min_instances']
if 'cool_down_period' in as_params:
asp_data['coolDownPeriodSec'] = as_params['cool_down_period']
if 'cpu_utilization' in as_params and 'target' in as_params[
'cpu_utilization']:
asp_data['cpuUtilization'] = {'utilizationTarget':
as_params['cpu_utilization']['target']}
if 'load_balancing_utilization' in as_params and 'target' in as_params[
'load_balancing_utilization']:
asp_data['loadBalancingUtilization'] = {
'utilizationTarget':
as_params['load_balancing_utilization']['target']
}
return asp_data
def create_autoscaler(gce, mig, params):
"""
Create a new Autoscaler for a MIG.
:param gce: An initialized GCE driver object.
:type gce: :class: `GCENodeDriver`
:param mig: An initialized GCEInstanceGroupManager.
:type mig: :class: `GCEInstanceGroupManager`
:param params: Dictionary of autoscaling parameters.
:type params: ``dict``
:return: Tuple with changed stats.
:rtype: tuple in the format of (bool, list)
"""
changed = False
as_policy = _gen_gce_as_policy(params['policy'])
autoscaler = gce.ex_create_autoscaler(name=params['name'], zone=mig.zone,
instance_group=mig, policy=as_policy)
if autoscaler:
changed = True
return changed
def update_autoscaler(gce, autoscaler, params):
"""
Update an Autoscaler.
Takes an existing Autoscaler object, and updates it with
the supplied params before calling libcloud's update method.
:param gce: An initialized GCE driver object.
:type gce: :class: `GCENodeDriver`
:param autoscaler: An initialized GCEAutoscaler.
:type autoscaler: :class: `GCEAutoscaler`
:param params: Dictionary of autoscaling parameters.
:type params: ``dict``
:return: True if changes, False otherwise.
:rtype: ``bool``
"""
as_policy = _gen_gce_as_policy(params['policy'])
if autoscaler.policy != as_policy:
autoscaler.policy = as_policy
autoscaler = gce.ex_update_autoscaler(autoscaler)
if autoscaler:
return True
return False
def delete_autoscaler(autoscaler):
"""
Delete an Autoscaler. Does not affect MIG.
:param mig: Managed Instance Group Object from Libcloud.
:type mig: :class: `GCEInstanceGroupManager`
:return: Tuple with changed stats and a list of affected instances.
:rtype: tuple in the format of (bool, list)
"""
changed = False
if autoscaler.destroy():
changed = True
return changed
def get_autoscaler(gce, name, zone):
"""
Get an Autoscaler from GCE.
If the Autoscaler is not found, None is found.
:param gce: An initialized GCE driver object.
:type gce: :class: `GCENodeDriver`
:param name: Name of the Autoscaler.
:type name: ``str``
:param zone: Zone that the Autoscaler is located in.
:type zone: ``str``
:return: A GCEAutoscaler object or None.
:rtype: :class: `GCEAutoscaler` or None
"""
try:
# Does the Autoscaler already exist?
return gce.ex_get_autoscaler(name, zone)
except ResourceNotFoundError:
return None
def create_mig(gce, params):
"""
Create a new Managed Instance Group.
:param gce: An initialized GCE driver object.
:type gce: :class: `GCENodeDriver`
:param params: Dictionary of parameters needed by the module.
:type params: ``dict``
:return: Tuple with changed stats and a list of affected instances.
:rtype: tuple in the format of (bool, list)
"""
changed = False
return_data = []
actions_filter = ['CREATING']
mig = gce.ex_create_instancegroupmanager(
name=params['name'], size=params['size'], template=params['template'],
zone=params['zone'])
if mig:
changed = True
return_data = _get_instance_list(mig, filter_list=actions_filter)
return (changed, return_data)
def delete_mig(mig):
"""
Delete a Managed Instance Group. All VMs in that MIG are also deleted."
:param mig: Managed Instance Group Object from Libcloud.
:type mig: :class: `GCEInstanceGroupManager`
:return: Tuple with changed stats and a list of affected instances.
:rtype: tuple in the format of (bool, list)
"""
changed = False
return_data = []
actions_filter = ['NONE', 'CREATING', 'RECREATING', 'DELETING',
'ABANDONING', 'RESTARTING', 'REFRESHING']
instance_names = _get_instance_list(mig, filter_list=actions_filter)
if mig.destroy():
changed = True
return_data = instance_names
return (changed, return_data)
def recreate_instances_in_mig(mig):
"""
Recreate the instances for a Managed Instance Group.
:param mig: Managed Instance Group Object from libcloud.
:type mig: :class: `GCEInstanceGroupManager`
:return: Tuple with changed stats and a list of affected instances.
:rtype: tuple in the format of (bool, list)
"""
changed = False
return_data = []
actions_filter = ['RECREATING']
if mig.recreate_instances():
changed = True
return_data = _get_instance_list(mig, filter_list=actions_filter)
return (changed, return_data)
def resize_mig(mig, size):
"""
Resize a Managed Instance Group.
Based on the size provided, GCE will automatically create and delete
VMs as needed.
:param mig: Managed Instance Group Object from libcloud.
:type mig: :class: `GCEInstanceGroupManager`
:return: Tuple with changed stats and a list of affected instances.
:rtype: tuple in the format of (bool, list)
"""
changed = False
return_data = []
actions_filter = ['CREATING', 'DELETING']
if mig.resize(size):
changed = True
return_data = _get_instance_list(mig, filter_list=actions_filter)
return (changed, return_data)
def get_mig(gce, name, zone):
"""
Get a Managed Instance Group from GCE.
If the MIG is not found, None is found.
:param gce: An initialized GCE driver object.
:type gce: :class: `GCENodeDriver`
:param name: Name of the Managed Instance Group.
:type name: ``str``
:param zone: Zone that the Managed Instance Group is located in.
:type zone: ``str``
:return: A GCEInstanceGroupManager object or None.
:rtype: :class: `GCEInstanceGroupManager` or None
"""
try:
# Does the MIG already exist?
return gce.ex_get_instancegroupmanager(name=name, zone=zone)
except ResourceNotFoundError:
return None
def update_named_ports(mig, named_ports):
"""
Set the named ports on a Managed Instance Group.
Sort the existing named ports and new. If different, update.
This also implicitly allows for the removal of named_por
:param mig: Managed Instance Group Object from libcloud.
:type mig: :class: `GCEInstanceGroupManager`
:param named_ports: list of dictionaries in the format of {'name': port}
:type named_ports: ``list`` of ``dict``
:return: True if successful
:rtype: ``bool``
"""
changed = False
existing_ports = []
new_ports = []
if hasattr(mig.instance_group, 'named_ports'):
existing_ports = sorted(mig.instance_group.named_ports,
key=lambda x: x['name'])
if named_ports is not None:
new_ports = sorted(named_ports, key=lambda x: x['name'])
if existing_ports != new_ports:
if mig.instance_group.set_named_ports(named_ports):
changed = True
return changed
def main():
module = AnsibleModule(argument_spec=dict(
name=dict(required=True),
template=dict(),
recreate_instances=dict(type='bool', default=False),
# Do not set a default size here. For Create and some update
# operations, it is required and should be explicitly set.
# Below, we set it to the existing value if it has not been set.
size=dict(type='int'),
state=dict(choices=['absent', 'present'], default='present'),
zone=dict(required=True),
autoscaling=dict(type='dict', default=None),
named_ports=dict(type='list', default=None),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
pem_file=dict(),
credentials_file=dict(),
project_id=dict(), ), )
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(
msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.')
gce = gce_connect(module)
if not hasattr(gce, 'ex_create_instancegroupmanager'):
module.fail_json(
msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.',
changed=False)
params = {}
params['state'] = module.params.get('state')
params['zone'] = module.params.get('zone')
params['name'] = module.params.get('name')
params['size'] = module.params.get('size')
params['template'] = module.params.get('template')
params['recreate_instances'] = module.params.get('recreate_instances')
params['autoscaling'] = module.params.get('autoscaling', None)
params['named_ports'] = module.params.get('named_ports', None)
(valid_autoscaling, as_msg) = _validate_autoscaling_params(params)
if not valid_autoscaling:
module.fail_json(msg=as_msg, changed=False)
if params['named_ports'] is not None and not hasattr(
gce, 'ex_instancegroup_set_named_ports'):
module.fail_json(
msg="Apache Libcloud 1.3.0+ is required to use 'named_ports' option",
changed=False)
(valid_named_ports, np_msg) = _validate_named_port_params(params)
if not valid_named_ports:
module.fail_json(msg=np_msg, changed=False)
changed = False
json_output = {'state': params['state'], 'zone': params['zone']}
mig = get_mig(gce, params['name'], params['zone'])
if not mig:
if params['state'] == 'absent':
# Doesn't exist in GCE, and state==absent.
changed = False
module.fail_json(
msg="Cannot delete unknown managed instance group: %s" %
(params['name']))
else:
# Create MIG
req_create_fields = [
{'name': 'template', 'required': True, 'type': str},
{'name': 'size', 'required': True, 'type': int}
] # yapf: disable
(valid_create_fields, valid_create_msg) = _check_params(
params, req_create_fields)
if not valid_create_fields:
module.fail_json(msg=valid_create_msg, changed=False)
(changed, json_output['created_instances']) = create_mig(gce,
params)
if params['autoscaling'] and params['autoscaling'][
'enabled'] is True:
# Fetch newly-created MIG and create Autoscaler for it.
mig = get_mig(gce, params['name'], params['zone'])
if not mig:
module.fail_json(
msg='Unable to fetch created MIG %s to create \
autoscaler in zone: %s' % (
params['name'], params['zone']), changed=False)
if not create_autoscaler(gce, mig, params['autoscaling']):
module.fail_json(
msg='Unable to fetch MIG %s to create autoscaler \
in zone: %s' % (params['name'], params['zone']),
changed=False)
json_output['created_autoscaler'] = True
# Add named ports if available
if params['named_ports']:
mig = get_mig(gce, params['name'], params['zone'])
if not mig:
module.fail_json(
msg='Unable to fetch created MIG %s to create \
autoscaler in zone: %s' % (
params['name'], params['zone']), changed=False)
json_output['set_named_ports'] = update_named_ports(
mig, params['named_ports'])
if json_output['set_named_ports']:
json_output['named_ports'] = params['named_ports']
elif params['state'] == 'absent':
# Delete MIG
# First, check and remove the autoscaler, if present.
# Note: multiple autoscalers can be associated to a single MIG. We
# only handle the one that is named, but we might want to think about this.
if params['autoscaling']:
autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
params['zone'])
if not autoscaler:
module.fail_json(msg='Unable to fetch autoscaler %s to delete \
in zone: %s' % (params['autoscaling']['name'], params['zone']),
changed=False)
changed = delete_autoscaler(autoscaler)
json_output['deleted_autoscaler'] = changed
# Now, delete the MIG.
(changed, json_output['deleted_instances']) = delete_mig(mig)
else:
# Update MIG
# If we're going to update a MIG, we need a size and template values.
# If not specified, we use the values from the existing MIG.
if not params['size']:
params['size'] = mig.size
if not params['template']:
params['template'] = mig.template.name
if params['template'] != mig.template.name:
# Update Instance Template.
new_template = gce.ex_get_instancetemplate(params['template'])
mig.set_instancetemplate(new_template)
json_output['updated_instancetemplate'] = True
changed = True
if params['recreate_instances'] is True:
# Recreate Instances.
(changed, json_output['recreated_instances']
) = recreate_instances_in_mig(mig)
if params['size'] != mig.size:
# Resize MIG.
keystr = 'created' if params['size'] > mig.size else 'deleted'
(changed, json_output['resize_%s_instances' %
(keystr)]) = resize_mig(mig, params['size'])
# Update Autoscaler
if params['autoscaling']:
autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
params['zone'])
if not autoscaler:
# Try to create autoscaler.
# Note: this isn't perfect, if the autoscaler name has changed
# we wouldn't know that here.
if not create_autoscaler(gce, mig, params['autoscaling']):
module.fail_json(
msg='Unable to create autoscaler %s for existing MIG %s\
in zone: %s' % (params['autoscaling']['name'],
params['name'], params['zone']),
changed=False)
json_output['created_autoscaler'] = True
changed = True
else:
if params['autoscaling']['enabled'] is False:
# Delete autoscaler
changed = delete_autoscaler(autoscaler)
json_output['delete_autoscaler'] = changed
else:
# Update policy, etc.
changed = update_autoscaler(gce, autoscaler,
params['autoscaling'])
json_output['updated_autoscaler'] = changed
named_ports = params['named_ports'] or []
json_output['updated_named_ports'] = update_named_ports(mig,
named_ports)
if json_output['updated_named_ports']:
json_output['named_ports'] = named_ports
json_output['changed'] = changed
json_output.update(params)
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| gpl-3.0 |
GustavoHennig/ansible | lib/ansible/modules/network/nxos/nxos_snmp_community.py | 19 | 8273 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_snmp_community
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP community configs.
description:
- Manages SNMP community configuration.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
options:
community:
description:
- Case-sensitive community string.
required: true
access:
description:
- Access type for community.
required: false
default: null
choices: ['ro','rw']
group:
description:
- Group to which the community belongs.
required: false
default: null
acl:
description:
- ACL name to filter snmp requests.
required: false
default: 1
state:
description:
- Manage the state of the resource.
required: true
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure snmp community is configured
- nxos_snmp_community:
community: TESTING7
group: network-operator
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"group": "network-operator"}
existing:
description: k/v pairs of existing snmp community
type: dict
sample: {}
end_state:
description: k/v pairs of snmp community after module execution
returned: always
type: dict or null
sample: {"acl": "None", "group": "network-operator"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["snmp-server community TESTING7 group network-operator"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
import re
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_groups(module):
command = 'show snmp group'
data = execute_show_command(command, module)[0]
group_list = []
try:
group_table = data['TABLE_role']['ROW_role']
for group in group_table:
group_list.append(group['role_name'])
except (KeyError, AttributeError):
return group_list
return group_list
def get_snmp_community(module, find_filter=None):
command = 'show snmp community'
data = execute_show_command(command, module)[0]
community_dict = {}
community_map = {
'grouporaccess': 'group',
'aclfilter': 'acl'
}
try:
community_table = data['TABLE_snmp_community']['ROW_snmp_community']
for each in community_table:
community = apply_key_map(community_map, each)
key = each['community_name']
community_dict[key] = community
except (KeyError, AttributeError):
return community_dict
if find_filter:
find = community_dict.get(find_filter, None)
if find_filter is None or find is None:
return {}
else:
fix_find = {}
for (key, value) in find.items():
if isinstance(value, str):
fix_find[key] = value.strip()
else:
fix_find[key] = value
return fix_find
def config_snmp_community(delta, community):
CMDS = {
'group': 'snmp-server community {0} group {group}',
'acl': 'snmp-server community {0} use-acl {acl}'
}
commands = []
for k, v in delta.items():
cmd = CMDS.get(k).format(community, **delta)
if cmd:
commands.append(cmd)
cmd = None
return commands
def main():
argument_spec = dict(
community=dict(required=True, type='str'),
access=dict(choices=['ro', 'rw']),
group=dict(type='str'),
acl=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[['access', 'group']],
mutually_exclusive=[['access', 'group']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
access = module.params['access']
group = module.params['group']
community = module.params['community']
acl = module.params['acl']
state = module.params['state']
if access:
if access == 'ro':
group = 'network-operator'
elif access == 'rw':
group = 'network-admin'
# group check - ensure group being configured exists on the device
configured_groups = get_snmp_groups(module)
if group not in configured_groups:
module.fail_json(msg="group not on switch."
"please add before moving forward")
existing = get_snmp_community(module, community)
args = dict(group=group, acl=acl)
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
changed = False
end_state = existing
commands = []
if state == 'absent':
if existing:
command = "no snmp-server community {0}".format(community)
commands.append(command)
cmds = flatten_list(commands)
elif state == 'present':
if delta:
command = config_snmp_community(dict(delta), community)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_snmp_community(module, community)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
r8921039/bitcoin | test/functional/feature_minchainwork.py | 33 | 4122 | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generatetoaddress(num_blocks_to_generate,
self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
| mit |
dfalt974/SickRage | lib/dateutil/zoneinfo/rebuild.py | 34 | 1719 | import logging
import os
import tempfile
import shutil
import json
from subprocess import check_call
from tarfile import TarFile
from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME
def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
"""Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
filename is the timezone tarball from ``ftp.iana.org/tz``.
"""
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
try:
with TarFile.open(filename) as tf:
for name in zonegroups:
tf.extract(name, tmpdir)
filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
try:
check_call(["zic", "-d", zonedir] + filepaths)
except OSError as e:
_print_on_nosuchfile(e)
raise
# write metadata file
with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
json.dump(metadata, f, indent=4, sort_keys=True)
target = os.path.join(moduledir, ZONEFILENAME)
with TarFile.open(target, "w:%s" % format) as tf:
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
finally:
shutil.rmtree(tmpdir)
def _print_on_nosuchfile(e):
"""Print helpful troubleshooting message
e is an exception raised by subprocess.check_call()
"""
if e.errno == 2:
logging.error(
"Could not find zic. Perhaps you need to install "
"libc-bin or some other package that provides it, "
"or it's not in your PATH?")
| gpl-3.0 |
Akrog/cinder | cinder/tests/targets/test_iser_driver.py | 5 | 3980 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder.tests.targets import test_lio_driver as test_lio
from cinder.tests.targets import test_tgt_driver as test_tgt
from cinder import utils
from cinder.volume.targets import iser
from cinder.volume.targets import lio
from cinder.volume.targets import tgt
class TestIserAdmDriver(test_tgt.TestTgtAdmDriver):
"""Unit tests for the deprecated ISERTgtAdm flow
"""
def setUp(self):
super(TestIserAdmDriver, self).setUp()
self.configuration.iser_ip_address = '10.9.8.7'
self.configuration.iser_target_prefix = 'iqn.2010-10.org.openstack:'
self.target = iser.ISERTgtAdm(root_helper=utils.get_root_helper(),
configuration=self.configuration)
@mock.patch.object(iser.ISERTgtAdm, '_get_iscsi_properties')
def test_initialize_connection(self, mock_get_iscsi):
connector = {'initiator': 'fake_init'}
# Test the normal case
mock_get_iscsi.return_value = {}
expected_return = {'driver_volume_type': 'iser',
'data': {}}
self.assertEqual(expected_return,
self.target.initialize_connection(self.testvol,
connector))
def test_iscsi_protocol(self):
self.assertEqual(self.target.iscsi_protocol, 'iser')
class TestIserTgtDriver(test_tgt.TestTgtAdmDriver):
"""Unit tests for the iSER TGT flow
"""
def setUp(self):
super(TestIserTgtDriver, self).setUp()
self.configuration.iscsi_protocol = 'iser'
self.target = tgt.TgtAdm(root_helper=utils.get_root_helper(),
configuration=self.configuration)
def test_iscsi_protocol(self):
self.assertEqual(self.target.iscsi_protocol, 'iser')
@mock.patch.object(tgt.TgtAdm, '_get_iscsi_properties')
def test_initialize_connection(self, mock_get_iscsi):
connector = {'initiator': 'fake_init'}
mock_get_iscsi.return_value = {}
expected_return = {'driver_volume_type': 'iser',
'data': {}}
self.assertEqual(expected_return,
self.target.initialize_connection(self.testvol,
connector))
class TestIserLioAdmDriver(test_lio.TestLioAdmDriver):
"""Unit tests for the iSER LIO flow
"""
def setUp(self):
super(TestIserLioAdmDriver, self).setUp()
self.configuration.iscsi_protocol = 'iser'
with mock.patch.object(lio.LioAdm, '_verify_rtstool'):
self.target = lio.LioAdm(root_helper=utils.get_root_helper(),
configuration=self.configuration)
self.target.db = mock.MagicMock(
volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'})
def test_iscsi_protocol(self):
self.assertEqual(self.target.iscsi_protocol, 'iser')
@mock.patch.object(utils, 'execute')
@mock.patch.object(lio.LioAdm, '_get_iscsi_properties')
def test_initialize_connection(self, mock_get_iscsi, mock_execute):
connector = {'initiator': 'fake_init'}
mock_get_iscsi.return_value = {}
ret = self.target.initialize_connection(self.testvol, connector)
driver_volume_type = ret['driver_volume_type']
self.assertEqual(driver_volume_type, 'iser')
| apache-2.0 |
pbuehler/AliPhysics | PWGJE/EMCALJetTasks/Tracks/analysis/old/ComparePeriodsTriggerToMB.py | 42 | 8443 | #! /usr/bin/env python
from ROOT import TCanvas, TGraphErrors, TLegend, TPaveText
from ROOT import kBlack, kBlue, kRed
from Helper import Frame, ReadHistList
from Graphics import Style
from SpectrumContainer import DataContainer
from copy import deepcopy
class PeriodComparisonPlot:
def __init__(self):
self.__comparisons = []
self.__canvas = None
self.__frames = {}
self.__legend = None
def AddComparison(self, comp):
self.__comparisons.append(comp)
def SetPlotRange(self, min ,max):
for comp in self.__comparisons:
comp.SetPlotRange(min, max)
def Draw(self):
self.__canvas = TCanvas("comparison%s" %(self.__comparisons[0].GetTriggerName()), "Comparison of different periods for trigger %s" %(self.__comparisons[0].GetTriggerName()), 1000, 600)
self.__canvas.Divide(2,1)
self.__legend = TLegend(0.15, 0.15, 0.45, 0.45)
self.__legend.SetBorderSize(0)
self.__legend.SetFillStyle(0)
self.__legend.SetTextFont(42)
specpad = self.__canvas.cd(1)
specpad.SetGrid(False,False)
specpad.SetLogx(True)
specpad.SetLogy(True)
self.__frames["Spectra"] = Frame("axisSpec%s" %(self.__comparisons[0].GetTriggerName()), 0, 100, 1e-10, 100)
self.__frames["Spectra"].SetXtitle("p_{t} (GeV/c)")
self.__frames["Spectra"].SetYtitle("1/N_{event} 1/(#Delta p_{t}) dN/dp_{t} ((GeV/c)^{-2})")
self.__frames["Spectra"].Draw()
self.__comparisons[0].DrawMinBiasSpectrum()
self.__comparisons[0].AddMBtoLegend(self.__legend)
for comp in sorted(self.__comparisons):
comp.DrawTriggeredSpectrum()
comp.AddTriggeredSpectrumToLegend(self.__legend)
self.__legend.Draw()
self.__label = self.__comparisons[0].CreateLabel(0.5, 0.75, 0.89, 0.85)
self.__label.Draw()
rpad = self.__canvas.cd(2)
rpad.SetGrid(False, False)
self.__frames["Ratios"] = Frame("axisRatio%s" %(self.__comparisons[0].GetTriggerName()), 0, 100, 0, 2000)
self.__frames["Ratios"].SetXtitle("p_{t} (GeV/c)")
self.__frames["Ratios"].SetYtitle("%s / Min. Bias" %(self.__comparisons[0].GetTriggerName()))
self.__frames["Ratios"].Draw()
for comp in sorted(self.__comparisons):
comp.DrawRatioTriggeredMinBias()
self.__canvas.cd()
def SaveAs(self, filenamebase):
"""
Save plot as image file
"""
types = ["eps", "pdf", "jpeg", "gif", "png"]
for t in types:
self.__canvas.SaveAs("%s.%s" %(filenamebase, t))
class TriggerComparison:
def __init__(self, trgspec, mbspec, triggername, dataname):
self.__triggeredspectrum = trgspec
self.__minbiasspectrum = mbspec
self.__ratiospectra = self.__triggeredspectrum.MakeRatio(self.__minbiasspectrum)
self.__ratiospectra.SetStyle(self.__triggeredspectrum.GetStyle())
self.__triggername = triggername
self.__dataname = dataname
def __cmp__(self, other):
othername = other.GetDataName()
if self.__dataname == othername:
return 0
elif self.__dataname < othername:
return -1
else:
return 1
def SetPlotRange(self, min, max):
self.__triggeredspectrum.SetPlotRange(min, max)
self.__minbiasspectrum.SetPlotRange(min, max)
self.__ratiospectra.SetPlotRange(min, max)
def GetTriggerName(self):
return self.__triggername
def GetDataName(self):
return self.__dataname
def DrawTriggeredSpectrum(self):
self.__triggeredspectrum.Draw()
def DrawMinBiasSpectrum(self):
self.__minbiasspectrum.Draw()
def DrawRatioTriggeredMinBias(self):
self.__ratiospectra.Draw()
def AddMBtoLegend(self, leg):
self.__minbiasspectrum.AddToLegend(leg, "MinBias")
def AddTriggeredSpectrumToLegend(self, leg):
self.__triggeredspectrum.AddToLegend(leg, self.__dataname)
def CreateLabel(self, xmin, ymin, xmax, ymax):
label = TPaveText(xmin, ymin, xmax, ymax, "NDC")
label.SetBorderSize(0)
label.SetFillStyle(0)
label.SetTextFont(42)
label.AddText("Trigger: %s" %(self.__triggername))
return label
class GraphicsObject:
def __init__(self, data, name):
self._data = data
self._graphics = None
self._style = Style(kBlack, 20)
self._plotrange = {"Min":None, "Max":None}
self._name = name
def SetPlotRange(self, min, max):
self._plotrange[min] = min
self._plotrange[max] = max
def SetStyle(self, style):
self._style = style
def SetName(self, name):
self._name = name
def GetData(self):
return self._data
def GetGraphics(self):
return self._graphics
def GetStyle(self):
return self._style
def Draw(self):
if not self._graphics:
self._graphics = TGraphErrors()
np = 0
for bin in range(1, self._data.GetXaxis().GetNbins()+1):
if self._plotrange["Min"] and self._data.GetXaxis().GetBinLowEdge(bin) < self._plotrange["Min"]:
continue
if self._plotrange["Max"] and self._data.GetXaxis().GetBinUpEdge(bin) > self._plotrange["Max"]:
break
self._graphics.SetPoint(np, self._data.GetXaxis().GetBinCenter(bin), self._data.GetBinContent(bin))
self._graphics.SetPointError(np, self._data.GetXaxis().GetBinWidth(bin)/2., self._data.GetBinError(bin))
np = np + 1
self._graphics.SetMarkerColor(self._style.GetColor())
self._graphics.SetLineColor(self._style.GetColor())
self._graphics.SetMarkerStyle(self._style.GetMarker())
self._graphics.Draw("epsame")
def AddToLegend(self, legend, title = None):
if self._graphics:
tit = self._name
if title:
tit = title
legend.AddEntry(self._graphics, tit, "lep")
class Spectrum(GraphicsObject):
def __init__(self, data, name):
GraphicsObject.__init__(self, data, name)
def MakeRatio(self, denominator):
result = deepcopy(self._data)
result.Divide(denominator.GetData())
ratio = Ratio(result)
if self._plotrange["Min"] or self._plotrange["Max"]:
ratio.SetPlotRange(self._plotrange["Min"], self._plotrange["Max"])
return ratio
class Ratio(GraphicsObject):
def __init__(self, data, name = None):
GraphicsObject.__init__(self, data, name)
def ReadSpectra(filename, trigger):
"""
Read the spectra for different trigger classes from the root file
Returns a dictionary of triggers - spectrum container
"""
hlist = ReadHistList(filename, "PtEMCalTriggerTask")
return DataContainer(eventHist = hlist.FindObject("hEventHist%s" %(trigger)), trackHist = hlist.FindObject("hTrackHist%s" %(trigger)))
def MakeNormalisedSpectrum(inputdata, name):
"""
Normalise spectrum by the number of events and by the bin width
"""
inputdata.SetVertexRange(-10., 10.)
inputdata.SetPileupRejection(True)
inputdata.SelectTrackCuts(1)
return inputdata.MakeProjection(0, "ptSpectrum%s" %(name), "p_{t} (GeV/c)", "1/N_{event} 1/(#Delta p_{t}) dN/dp_{t} ((GeV/c)^{-2})")
def ComparePeriods(filea, fileb, filemb, namea, nameb, trigger):
triggers = {}
dataA = ReadSpectra(filea, trigger)
dataB = ReadSpectra(fileb, trigger)
dataMB = ReadSpectra(filemb, "MinBias")
specA = Spectrum(MakeNormalisedSpectrum(dataA, namea), namea)
specA.SetStyle(Style(kBlue, 24))
specB = Spectrum(MakeNormalisedSpectrum(dataB, nameb), nameb)
specB.SetStyle(Style(kRed, 25))
specMB = Spectrum(MakeNormalisedSpectrum(dataMB, "MinBias"), "MinBias")
specMB.SetStyle(Style(kBlack, 25))
plot = PeriodComparisonPlot()
plot.AddComparison(TriggerComparison(specA, specMB, trigger, namea))
plot.AddComparison(TriggerComparison(specB, specMB, trigger, nameb))
plot.SetPlotRange(2., 100.)
plot.Draw()
return plot
| bsd-3-clause |
Pulshen/XKernel | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
katyhuff/moose | gui/utils/mergeLists.py | 34 | 1682 | #!/usr/bin/python
''' Merge the lists together preserving the ordering of master. "master" will be the merged list after completion. '''
def mergeLists(master, slave):
for slave_value_position in xrange(0,len(slave)):
value = slave[slave_value_position]
if value in master:
continue
else:
slave_before_slice = slave[0:slave_value_position]
slave_after_slice = slave[slave_value_position+1:]
best_score = 0
best_position = 0
for master_position in xrange(0,len(master)+1):
current_score = 0
master_before_slice = master[0:master_position]
master_after_slice = master[master_position:]
for slave_slice_position in xrange(len(slave_before_slice)):
slave_value = slave_before_slice[slave_slice_position]
if slave_value in master_before_slice:
current_score += 1.0/float(len(slave_before_slice)-slave_slice_position+1)
for slave_slice_position in xrange(len(slave_after_slice)):
slave_value = slave_after_slice[slave_slice_position]
if slave_value in master_after_slice:
current_score += 1.0/float(slave_slice_position+1)
if current_score > best_score:
best_position = master_position
best_score = current_score
master.insert(best_position,value)
if __name__ == '__main__':
input = ['Variables','Functions','Kernels','BCs','Executioner','Outputs']
# input = []
template = ['Variables','AuxVariables','Kernels','AuxKernels','BCs','AuxBCs','Postprocessors','Executioner','Outputs']
# template = ['Variables','AuxVariables']
# template = []
mergeLists(input, template)
print input
| lgpl-2.1 |
kivy/kivy | kivy/core/image/img_tex.py | 21 | 1549 | '''
Tex: Compressed texture
'''
__all__ = ('ImageLoaderTex', )
import json
from struct import unpack
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
class ImageLoaderTex(ImageLoaderBase):
@staticmethod
def extensions():
return ('tex', )
def load(self, filename):
try:
fd = open(filename, 'rb')
if fd.read(4) != 'KTEX':
raise Exception('Invalid tex identifier')
headersize = unpack('I', fd.read(4))[0]
header = fd.read(headersize)
if len(header) != headersize:
raise Exception('Truncated tex header')
info = json.loads(header)
data = fd.read()
if len(data) != info['datalen']:
raise Exception('Truncated tex data')
except:
Logger.warning('Image: Image <%s> is corrupted' % filename)
raise
width, height = info['image_size']
tw, th = info['texture_size']
images = [data]
im = ImageData(width, height, str(info['format']), images[0],
source=filename)
'''
if len(dds.images) > 1:
images = dds.images
images_size = dds.images_size
for index in range(1, len(dds.images)):
w, h = images_size[index]
data = images[index]
im.add_mipmap(index, w, h, data)
'''
return [im]
# register
ImageLoader.register(ImageLoaderTex)
| mit |
Nowheresly/odoo | addons/contacts/__openerp__.py | 260 | 1594 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Address Book',
'version': '1.0',
'category': 'Tools',
'description': """
This module gives you a quick view of your address book, accessible from your home page.
You can track your suppliers, customers and other contacts.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'summary': 'Contacts, People and Companies',
'depends': [
'mail',
],
'data': [
'contacts_view.xml',
],
'installable': True,
'application': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
2014c2g14/2014c2 | exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/tempfile.py | 728 | 22357 | """Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. The interfaces listed
as "safe" just below can be used without fear of race conditions.
Those listed as "unsafe" cannot, and are provided for backward
compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import warnings as _warnings
import sys as _sys
import io as _io
import os as _os
import errno as _errno
from random import Random as _Random
try:
import fcntl as _fcntl
except ImportError:
def _set_cloexec(fd):
pass
else:
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except OSError:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOINHERIT'):
_text_openflags |= _os.O_NOINHERIT
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# Although it does not have an underscore for historical reasons, this
# variable is an internal implementation detail (see issue 10354).
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
f = open(fn)
f.close()
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in "123456"]
return ''.join(letters)
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except OSError:
break # no point trying more names in this directory
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if _os.name == 'nt':
continue
else:
raise
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags)
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
return file
except FileExistsError:
continue # try again
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.close_called = False
self.delete = delete
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
return iter(self.file)
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
unlink = _os.unlink
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
if self.delete:
self.unlink(self.name)
def __del__(self):
self.close()
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
else:
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed unless the 'delete' argument is set to False.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from BytesIO
or StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix="", prefix=template, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# Setting newline="\n" avoids newline translation;
# this is important because otherwise on Windows we'd
# hget double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# BytesIO/StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self._closed = False
self.name = None # Handle mkdtemp raising an exception
self.name = mkdtemp(suffix, prefix, dir)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name and not self._closed:
try:
self._rmtree(self.name)
except (TypeError, AttributeError) as ex:
# Issue #10188: Emit a warning on stderr
# if the directory could not be cleaned
# up due to missing globals
if "None" not in str(ex):
raise
print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
file=_sys.stderr)
return
self._closed = True
if _warn:
self._warn("Implicitly cleaning up {!r}".format(self),
ResourceWarning)
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_islink = staticmethod(_os.path.islink)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = OSError
_warn = _warnings.warn
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname) and not self._islink(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
| gpl-2.0 |
jiajiechen/mxnet | example/rcnn/rcnn/tools/reeval.py | 16 | 2329 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import mxnet as mx
from ..logger import logger
from ..config import config, default, generate_config
from ..dataset import *
def reeval(args):
# load imdb
imdb = eval(args.dataset)(args.image_set, args.root_path, args.dataset_path)
# load detection results
cache_file = os.path.join(imdb.cache_path, imdb.name, 'detections.pkl')
with open(cache_file) as f:
detections = pickle.load(f)
# eval
imdb.evaluate_detections(detections)
def parse_args():
parser = argparse.ArgumentParser(description='imdb test')
# general
parser.add_argument('--network', help='network name', default=default.network, type=str)
parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str)
args, rest = parser.parse_known_args()
generate_config(args.network, args.dataset)
parser.add_argument('--image_set', help='image_set name', default=default.image_set, type=str)
parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str)
parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str)
# other
parser.add_argument('--no_shuffle', help='disable random shuffle', action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_args()
logger.info('Called with argument: %s' % args)
reeval(args)
if __name__ == '__main__':
main()
| apache-2.0 |
LLNL/spack | var/spack/repos/builtin/packages/py-python-socketio/package.py | 5 | 1037 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPythonSocketio(PythonPackage):
"""Python implementation of the Socket.IO realtime server."""
homepage = "https://github.com/miguelgrinberg/python-socketio"
url = "https://pypi.io/packages/source/p/python-socketio/python-socketio-1.8.4.tar.gz"
version('1.8.4', sha256='13807ce17e85371d15b31295a43b1fac1c0dba1eb5fc233353a3efd53aa122cc')
variant('eventlet', default=True,
description="Pulls in optional eventlet dependency, required"
" for using the zmq implementation.")
depends_on('py-setuptools', type='build')
depends_on('py-six@1.9.0:', type=("build", "run"))
depends_on('py-python-engineio@1.2.1:', type=("build", "run"))
depends_on('py-eventlet', when='+eventlet', type=("build", "run"))
| lgpl-2.1 |
rogerscristo/BotFWD | env/lib/python3.6/site-packages/pytests/test_inputtextmessagecontent.py | 1 | 3394 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import json
import pytest
from telegram import InputTextMessageContent, InputMessageContent, ParseMode
@pytest.fixture()
def json_dict():
return {
'parse_mode': TestInputTextMessageContent.parse_mode,
'message_text': TestInputTextMessageContent.message_text,
'disable_web_page_preview': TestInputTextMessageContent.disable_web_page_preview,
}
@pytest.fixture(scope='class')
def input_text_message_content():
return InputTextMessageContent(TestInputTextMessageContent.message_text,
parse_mode=TestInputTextMessageContent.parse_mode,
disable_web_page_preview=TestInputTextMessageContent.disable_web_page_preview)
class TestInputTextMessageContent:
message_text = '*message text*'
parse_mode = ParseMode.MARKDOWN
disable_web_page_preview = True
def test_de_json(self, json_dict, bot):
input_text_message_content_json = InputTextMessageContent.de_json(json_dict, bot)
assert input_text_message_content_json.parse_mode == self.parse_mode
assert input_text_message_content_json.message_text == self.message_text
assert input_text_message_content_json.disable_web_page_preview == \
self.disable_web_page_preview
def test_input_text_message_content_json_de_json_factory(self, json_dict, bot):
input_text_message_content_json = InputMessageContent.de_json(json_dict, bot)
assert isinstance(input_text_message_content_json, InputTextMessageContent)
def test_de_json_factory_without_required_args(self, json_dict, bot):
del (json_dict['message_text'])
input_text_message_content_json = InputMessageContent.de_json(json_dict, bot)
assert input_text_message_content_json is None
def test_to_json(self, input_text_message_content):
json.loads(input_text_message_content.to_json())
def test_to_dict(self, input_text_message_content):
input_text_message_content_dict = input_text_message_content.to_dict()
assert isinstance(input_text_message_content_dict, dict)
assert input_text_message_content_dict['message_text'] == \
input_text_message_content.message_text
assert input_text_message_content_dict['parse_mode'] == \
input_text_message_content.parse_mode
assert input_text_message_content_dict['disable_web_page_preview'] == \
input_text_message_content.disable_web_page_preview
| mit |
BT-ojossen/sale-workflow | __unported__/sale_delivery_term/sale.py | 34 | 14224 | # -*- coding: utf-8 -*-
#
#
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.osv import fields, orm
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class sale_delivery_term(orm.Model):
_name = 'sale.delivery.term'
_columns = {
'name': fields.char('Name', size=64, required=True),
'line_ids': fields.one2many(
'sale.delivery.term.line', 'term_id', 'Lines', required=True),
'company_id': fields.many2one(
'res.company', 'Company', required=True, select=1),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get(
'res.company')._company_default_get(
cr, uid, 'sale.delivery.term', context=c),
}
def is_total_percentage_correct(self, cr, uid, term_ids, context=None):
for term in self.browse(cr, uid, term_ids, context=context):
total = 0.0
for line in term.line_ids:
total += line.quantity_perc
if total != 1:
return False
return True
class sale_delivery_term_line(orm.Model):
_name = 'sale.delivery.term.line'
_rec_name = 'term_id'
_columns = {
'term_id': fields.many2one(
'sale.delivery.term', 'Term', ondelete='cascade'),
'quantity_perc': fields.float(
'Quantity percentage', required=True,
help="For 20% set '0.2'",
digits_compute=dp.get_precision('Sale Delivery Term')),
'delay': fields.float(
'Delivery Lead Time', required=True,
help="Number of days between the order confirmation and the "
"shipping of the products to the customer"),
}
class sale_order_line_master(orm.Model):
def _clean_on_change_dict(self, res_dict):
if 'delay' in res_dict['value']:
del res_dict['value']['delay']
if 'th_weight' in res_dict['value']:
del res_dict['value']['th_weight']
if 'type' in res_dict['value']:
del res_dict['value']['type']
if 'tax_id' in res_dict['value']:
res_dict['value']['tax_ids'] = res_dict['value']['tax_id']
del res_dict['value']['tax_id']
return res_dict
def product_id_change(
self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False,
fiscal_position=False, flag=False, context=None
):
res = self.pool.get(
'sale.order.line').product_id_change(
cr, uid, ids, pricelist, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name,
partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order,
packaging=packaging, fiscal_position=fiscal_position,
flag=flag, context=context)
return self._clean_on_change_dict(res)
def product_uom_change(
self, cursor, user, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, context=None
):
res = self.pool.get(
'sale.order.line').product_uom_change(
cursor, user, ids, pricelist, product, qty=qty,
uom=uom, qty_uos=qty_uos, uos=uos, name=name,
partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order,
context=context)
return self._clean_on_change_dict(res)
def product_packaging_change(
self, cr, uid, ids, pricelist, product, qty=0, uom=False,
partner_id=False, packaging=False, flag=False, context=None
):
return self.pool.get('sale.order.line').product_packaging_change(
cr, uid, ids, pricelist, product, qty=qty, uom=uom,
partner_id=partner_id, packaging=packaging, flag=flag,
context=context)
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
res = {}
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = tax_obj.compute_all(
cr, uid, line.tax_ids, price,
line.product_uom_qty, line.order_id.partner_invoice_id.id,
line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, *args):
return self.pool.get('sale.order.line')._get_uom_id(cr, uid, args)
_name = 'sale.order.line.master'
_columns = {
'order_id': fields.many2one(
'sale.order', 'Order Reference', required=True,
ondelete='cascade'),
'delivery_term_id': fields.many2one(
'sale.delivery.term', 'Delivery term',
required=True, ondelete='restrict'),
'name': fields.char('Description', size=256, required=True),
'product_id': fields.many2one(
'product.product', 'Product',
domain=[('sale_ok', '=', True)]),
'price_unit': fields.float(
'Unit Price', required=True,
digits_compute=dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal',
digits_compute=dp.get_precision(
'Product Price')),
'product_uom_qty': fields.float(
'Quantity (UoM)', digits_compute=dp.get_precision('Product UoS'),
required=True),
'product_uom': fields.many2one(
'product.uom', 'Unit of Measure ', required=True),
'product_uos_qty': fields.float(
'Quantity (UoS)', digits_compute=dp.get_precision('Product UoS')),
'product_uos': fields.many2one('product.uom', 'Product UoS'),
'product_packaging': fields.many2one('product.packaging', 'Packaging'),
'order_line_ids': fields.one2many(
'sale.order.line', 'master_line_id', 'Detailed lines'),
'discount': fields.float('Discount (%)', digits=(16, 2)),
'tax_ids': fields.many2many(
'account.tax', 'sale_master_order_line_tax', 'order_line_id',
'tax_id', 'Taxes'),
}
_defaults = {
'product_uom': _get_uom_id,
'product_uom_qty': 1,
'product_uos_qty': 1,
'product_packaging': False,
'price_unit': 0.0,
}
def _prepare_order_line(
self, cr, uid, term_line, master_line, group_index=0, context=None
):
order_line_pool = self.pool.get('sale.order.line')
group_pool = self.pool.get('sale.order.line.group')
group_ids = group_pool.search(cr, uid, [])
product_uom_qty = master_line.product_uom_qty * term_line.quantity_perc
product_uos_qty = master_line.product_uos_qty * term_line.quantity_perc
order_line_vals = {}
on_change_res = order_line_pool.product_id_change(
cr, uid, [], master_line.order_id.pricelist_id.id,
master_line.product_id.id, qty=product_uom_qty,
uom=master_line.product_uom.id, qty_uos=product_uos_qty,
uos=master_line.product_uos.id, name=master_line.name,
partner_id=master_line.order_id.partner_id.id,
lang=False, update_tax=True,
date_order=master_line.order_id.date_order,
packaging=master_line.product_packaging.id,
fiscal_position=master_line.order_id.fiscal_position.id,
flag=False, context=context)
order_line_vals.update(on_change_res['value'])
order_line_vals.update({
'order_id': master_line.order_id.id,
'name': master_line.name,
'price_unit': master_line.price_unit,
'product_uom_qty': product_uom_qty,
'product_uom': master_line.product_uom.id,
'product_id': (
master_line.product_id and master_line.product_id.id or False),
'product_uos_qty': product_uos_qty,
'product_uos': (
master_line.product_uos.id
if master_line.product_uos
else False),
'product_packaging': master_line.product_packaging.id,
'master_line_id': master_line.id,
'delay': term_line.delay,
'picking_group_id': group_ids[group_index],
'tax_id': [(6, 0, [tax.id for tax in master_line.tax_ids])],
})
return order_line_vals
def generate_detailed_lines(self, cr, uid, ids, context=None):
group_pool = self.pool.get('sale.order.line.group')
order_line_pool = self.pool.get('sale.order.line')
group_ids = group_pool.search(cr, uid, [])
for master_line in self.browse(cr, uid, ids):
if master_line.order_line_ids:
raise orm.except_orm(
_('Error'),
_("Detailed lines generated yet (for master line '%s'). "
"Remove them first") % master_line.name)
if len(master_line.delivery_term_id.line_ids) > len(group_ids):
raise orm.except_orm(
_('Error'),
_("Delivery term lines are %d. Order line groups are %d. "
"Please create more groups")
% (len(master_line.delivery_term_id.line_ids),
len(group_ids)))
if not master_line.delivery_term_id.is_total_percentage_correct():
raise orm.except_orm(
_('Error'),
_("Total percentage of delivery term %s is not equal to 1")
% master_line.delivery_term_id.name)
for group_index, term_line in enumerate(
master_line.delivery_term_id.line_ids
):
order_line_vals = self._prepare_order_line(
cr, uid, term_line, master_line, group_index=group_index,
context=context)
order_line_pool.create(
cr, uid, order_line_vals, context=context)
return True
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'order_line_ids': [],
})
return super(sale_order_line_master, self).copy_data(
cr, uid, id, default, context=context)
def check_master_line_total(self, cr, uid, ids, context=None):
for master_line in self.browse(cr, uid, ids, context):
master_uom_qty = master_line.product_uom_qty
master_uos_qty = master_line.product_uos_qty
total_uom_qty = 0.0
total_uos_qty = 0.0
for order_line in master_line.order_line_ids:
total_uom_qty += order_line.product_uom_qty
total_uos_qty += order_line.product_uos_qty
if master_uom_qty != total_uom_qty:
raise orm.except_orm(
_('Error'),
_('Order lines total quantity %s is different from master '
'line quantity %s') % (total_uom_qty, master_uom_qty))
if master_uos_qty != total_uos_qty:
raise orm.except_orm(
_('Error'),
_('Order lines total quantity %s is different from master '
'line quantity %s') % (total_uos_qty, master_uos_qty))
class sale_order_line(orm.Model):
_inherit = 'sale.order.line'
_columns = {
'master_line_id': fields.many2one(
'sale.order.line.master', 'Master Line'),
}
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'master_line_id': False})
return super(sale_order_line, self).copy_data(
cr, uid, id, default, context=context)
class sale_order(orm.Model):
_inherit = 'sale.order'
_columns = {
'master_order_line': fields.one2many(
'sale.order.line.master', 'order_id', 'Master Order Lines',
readonly=True, states={'draft': [('readonly', False)]}),
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'order_line': [],
})
return super(sale_order, self).copy(
cr, uid, id, default, context=context)
def generate_detailed_lines(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context):
for master_line in order.master_order_line:
master_line.generate_detailed_lines()
return True
def action_wait(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context):
for master_line in order.master_order_line:
master_line.check_master_line_total()
return super(sale_order, self).action_wait(
cr, uid, ids, context=context)
| agpl-3.0 |
totoromano/settings | node_modules/node-gyp/gyp/pylib/gyp/MSVSNew.py | 1835 | 12124 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution(object):
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()
| mit |
dednal/chromium.src | tools/metrics/histograms/update_extension_permission.py | 59 | 1028 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates ExtensionPermission2 enum in histograms.xml file with values read
from permission_message.h.
If the file was pretty-printed, the updated version is pretty-printed too.
"""
import os
import sys
from update_histogram_enum import UpdateHistogramEnum
if __name__ == '__main__':
if len(sys.argv) > 1:
print >>sys.stderr, 'No arguments expected!'
sys.stderr.write(__doc__)
sys.exit(1)
UpdateHistogramEnum(histogram_enum_name='ExtensionPermission2',
source_enum_path=os.path.join('..', '..', '..',
'extensions', 'common',
'permissions',
'permission_message.h'),
start_marker='^enum ID {',
end_marker='^kEnumBoundary')
| bsd-3-clause |
shakamunyi/tensorflow | tensorflow/compiler/tests/pooling_ops_test.py | 43 | 16927 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
def NHWCToNCHW(input_tensor):
"""Convert the input from NHWC format to NCHW.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 3, 1, 2])
else:
return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
def NCHWToNHWC(input_tensor):
"""Convert the input from NCHW format to NHWC.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, ops.Tensor):
return array_ops.transpose(input_tensor, [0, 2, 3, 1])
else:
return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs
"""
test_configs = ["NHWC", "NCHW"]
return test_configs
class PoolingTest(XLATestCase):
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
data_format, expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, currently only co.MaxPool.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
"""
total_size = np.prod(input_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.array([f * 1.0 for f in range(1, total_size + 1)], dtype=np.float32)
x = x.reshape(input_sizes)
with self.test_session() as sess:
with self.test_scope():
inputs = array_ops.placeholder(dtypes.float32)
t = inputs
if data_format == "NCHW":
t = NHWCToNCHW(t)
ksize = NHWCToNCHW(ksize)
strides = NHWCToNCHW(strides)
t = pool_func(t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
t = NCHWToNHWC(t)
actual = sess.run(t, {inputs: x})
self.assertAllClose(expected, actual.flatten(), rtol=1e-5, atol=1e-6)
def _VerifyValues(self, pool_func, input_sizes, ksize, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,
data_format, expected)
def testMaxPoolValidPadding(self):
expected_output = [13.0, 14.0, 15.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testMaxPoolSamePadding(self):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
self._VerifyValues(nn_ops.max_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingNonSquareWindow(self):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0])
def testMaxPoolValidPaddingUnevenStride(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0])
def testMaxPoolSamePaddingFilter4(self):
expected_output = [
21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,
61.0, 62.0, 63.0, 64.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
def testMaxPoolSamePaddingFilter8(self):
expected_output = [
145.0, 146.0, 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 161.0, 162.0,
163.0, 164.0, 165.0, 166.0, 167.0, 168.0, 177.0, 178.0, 179.0, 180.0,
181.0, 182.0, 183.0, 184.0, 185.0, 186.0, 187.0, 188.0, 189.0, 190.0,
191.0, 192.0, 273.0, 274.0, 275.0, 276.0, 277.0, 278.0, 279.0, 280.0,
289.0, 290.0, 291.0, 292.0, 293.0, 294.0, 295.0, 296.0, 305.0, 306.0,
307.0, 308.0, 309.0, 310.0, 311.0, 312.0, 313.0, 314.0, 315.0, 316.0,
317.0, 318.0, 319.0, 320.0, 401.0, 402.0, 403.0, 404.0, 405.0, 406.0,
407.0, 408.0, 417.0, 418.0, 419.0, 420.0, 421.0, 422.0, 423.0, 424.0,
433.0, 434.0, 435.0, 436.0, 437.0, 438.0, 439.0, 440.0, 441.0, 442.0,
443.0, 444.0, 445.0, 446.0, 447.0, 448.0, 465.0, 466.0, 467.0, 468.0,
469.0, 470.0, 471.0, 472.0, 481.0, 482.0, 483.0, 484.0, 485.0, 486.0,
487.0, 488.0, 497.0, 498.0, 499.0, 500.0, 501.0, 502.0, 503.0, 504.0,
505.0, 506.0, 507.0, 508.0, 509.0, 510.0, 511.0, 512.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
# Tests for DepthwiseMaxPooling on CPU only.
def testDepthwiseMaxPool1x1DepthWindow1(self):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0])
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0])
def testKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33])
def testKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11])
# Average pooling
def testAvgPoolValidPadding(self):
expected_output = [7, 8, 9]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output)
def testAvgPoolSamePadding(self):
expected_output = [7., 8., 9., 11.5, 12.5, 13.5]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output)
class PoolGradTest(XLATestCase):
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
def _VerifyOneTest(self, pool_func, pool_grad_func, input_sizes, ksize,
strides, padding, data_format):
"""Verifies the output values of the pooling gradient function.
Args:
pool_func: Forward pooling function
pool_grad_func: Pooling gradient function for pool_grad_func
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
"""
total_size = np.prod(input_sizes)
x = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_sizes)
with self.test_session() as sess:
# Use the forward pool function to compute some corresponding outputs
# (needed for the CPU device, and we need the shape in both cases).
with ops.device(self.CPU_DEVICE):
inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
outputs = pool_func(
inputs,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
output_vals = np.array(sess.run(outputs, {inputs: x}))
output_gradient_vals = np.arange(
1, output_vals.size + 1, dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
# Use the Tensorflow CPU pooling gradient to compute the expected input
# gradients.
with ops.device(self.CPU_DEVICE):
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_vals.shape)
expected_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NHWC")
expected_input_gradient_vals = sess.run(
expected_input_gradients,
{inputs: x,
output_gradients: output_gradient_vals})
# Run the gradient op on the XLA device
with self.test_scope():
outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
xla_inputs = inputs
xla_outputs = outputs
xla_output_gradients = output_gradients
xla_ksize = ksize
xla_strides = strides
if data_format == "NCHW":
xla_inputs = NHWCToNCHW(inputs)
xla_outputs = NHWCToNCHW(outputs)
xla_output_gradients = NHWCToNCHW(output_gradients)
xla_ksize = NHWCToNCHW(ksize)
xla_strides = NHWCToNCHW(strides)
actual_input_gradients = pool_grad_func(
xla_inputs,
xla_outputs,
xla_output_gradients,
ksize=xla_ksize,
strides=xla_strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
actual_input_gradients = NCHWToNHWC(actual_input_gradients)
actual = sess.run(actual_input_gradients, {
inputs: x,
outputs: output_vals,
output_gradients: output_gradient_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_input_gradient_vals.flatten(),
actual.flatten(),
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual, inputs)
def _VerifyValues(self, pool_func, pool_grad_func, input_sizes, ksize,
strides, padding):
"""Verifies the output values of the pooling function.
Args:
pool_func: Pooling function to be called, e.g., tf.nn.max_pool
pool_grad_func: Corresponding pooling gradient function.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
"""
for data_format in GetTestConfigs():
self._VerifyOneTest(pool_func, pool_grad_func, input_sizes, ksize,
strides, padding, data_format)
def _TestPooling(self, forward_op, backward_op):
# VALID padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID")
# SAME padding
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME")
# SAME padding, non square window
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME")
# VALID padding, uneven stride
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID")
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID")
# SAME padding, size 4 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME")
# SAME padding, size 8 input
self._VerifyValues(
forward_op,
backward_op,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME")
def testMaxPool(self):
self._TestPooling(nn_ops.max_pool, gen_nn_ops._max_pool_grad)
def testAvgPool(self):
# Wrapper around AvgPoolGrad that ignores extra arguments needed by
# MaxPoolGrad.
def AvgPoolGrad(inputs, outputs, output_gradients, ksize, strides, padding,
data_format):
del outputs # Unused by average-pooling gradients.
return gen_nn_ops._avg_pool_grad(
inputs.get_shape().as_list(),
output_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
self._TestPooling(nn_ops.avg_pool, AvgPoolGrad)
# The CPU implementation of AvgPoolGrad doesn't accept kernels smaller than
# the stride size, so we only run the following tests on MaxPoolGrad.
def testMaxPoolKernelSmallerThanStrideValid(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops._max_pool_grad,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID")
def testMaxPoolKernelSmallerThanStrideSame(self):
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops._max_pool_grad,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
self._VerifyValues(
nn_ops.max_pool,
gen_nn_ops._max_pool_grad,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME")
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
Jgarcia-IAS/SITE | addons/auth_crypt/auth_crypt.py | 108 | 3919 | import logging
from passlib.context import CryptContext
import openerp
from openerp.osv import fields, osv
_logger = logging.getLogger(__name__)
default_crypt_context = CryptContext(
# kdf which can be verified by the context. The default encryption kdf is
# the first of the list
['pbkdf2_sha512', 'md5_crypt'],
# deprecated algorithms are still verified as usual, but ``needs_update``
# will indicate that the stored hash should be replaced by a more recent
# algorithm. Passlib 1.6 supports an `auto` value which deprecates any
# algorithm but the default, but Debian only provides 1.5 so...
deprecated=['md5_crypt'],
)
class res_users(osv.osv):
_inherit = "res.users"
def init(self, cr):
_logger.info("Hashing passwords, may be slow for databases with many users...")
cr.execute("SELECT id, password FROM res_users"
" WHERE password IS NOT NULL"
" AND password != ''")
for uid, pwd in cr.fetchall():
self._set_password(cr, openerp.SUPERUSER_ID, uid, pwd)
def set_pw(self, cr, uid, id, name, value, args, context):
if value:
self._set_password(cr, uid, id, value, context=context)
self.invalidate_cache(cr, uid, context=context)
def get_pw( self, cr, uid, ids, name, args, context ):
cr.execute('select id, password from res_users where id in %s', (tuple(map(int, ids)),))
return dict(cr.fetchall())
_columns = {
'password': fields.function(get_pw, fnct_inv=set_pw, type='char', string='Password', invisible=True, store=True),
'password_crypt': fields.char(string='Encrypted Password', invisible=True, copy=False),
}
def check_credentials(self, cr, uid, password):
# convert to base_crypt if needed
cr.execute('SELECT password, password_crypt FROM res_users WHERE id=%s AND active', (uid,))
encrypted = None
if cr.rowcount:
stored, encrypted = cr.fetchone()
if stored and not encrypted:
self._set_password(cr, uid, uid, stored)
self.invalidate_cache(cr, uid)
try:
return super(res_users, self).check_credentials(cr, uid, password)
except openerp.exceptions.AccessDenied:
if encrypted:
valid_pass, replacement = self._crypt_context(cr, uid, uid)\
.verify_and_update(password, encrypted)
if replacement is not None:
self._set_encrypted_password(cr, uid, uid, replacement)
if valid_pass:
return
raise
def _set_password(self, cr, uid, id, password, context=None):
""" Encrypts then stores the provided plaintext password for the user
``id``
"""
encrypted = self._crypt_context(cr, uid, id, context=context).encrypt(password)
self._set_encrypted_password(cr, uid, id, encrypted, context=context)
def _set_encrypted_password(self, cr, uid, id, encrypted, context=None):
""" Store the provided encrypted password to the database, and clears
any plaintext password
:param uid: id of the current user
:param id: id of the user on which the password should be set
"""
cr.execute(
"UPDATE res_users SET password='', password_crypt=%s WHERE id=%s",
(encrypted, id))
def _crypt_context(self, cr, uid, id, context=None):
""" Passlib CryptContext instance used to encrypt and verify
passwords. Can be overridden if technical, legal or political matters
require different kdfs than the provided default.
Requires a CryptContext as deprecation and upgrade notices are used
internally
"""
return default_crypt_context
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kosgroup/odoo | odoo/workflow/service.py | 20 | 4081 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from helpers import Session
from helpers import Record
from odoo.workflow.instance import WorkflowInstance
# import instance
class WorkflowService(object):
CACHE = {}
@classmethod
def clear_cache(cls, dbname):
cls.CACHE[dbname] = {}
@classmethod
def new(cls, cr, uid, model_name, record_id):
return cls(Session(cr, uid), Record(model_name, record_id))
def __init__(self, session, record):
assert isinstance(session, Session)
assert isinstance(record, Record)
self.session = session
self.record = record
self.cr = self.session.cr
def write(self):
self.cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s',
(self.record.id or None, self.record.model or None, 'active')
)
for (instance_id,) in self.cr.fetchall():
WorkflowInstance(self.session, self.record, {'id': instance_id}).update()
def trigger(self):
self.cr.execute('select instance_id from wkf_triggers where res_id=%s and model=%s', (self.record.id, self.record.model))
res = self.cr.fetchall()
for (instance_id,) in res:
self.cr.execute('select %s,res_type,res_id from wkf_instance where id=%s', (self.session.uid, instance_id,))
current_uid, current_model_name, current_record_id = self.cr.fetchone()
current_session = Session(self.session.cr, current_uid)
current_record = Record(current_model_name, current_record_id)
WorkflowInstance(current_session, current_record, {'id': instance_id}).update()
def delete(self):
WorkflowInstance(self.session, self.record, {}).delete()
def create(self):
WorkflowService.CACHE.setdefault(self.cr.dbname, {})
wkf_ids = WorkflowService.CACHE[self.cr.dbname].get(self.record.model, None)
if not wkf_ids:
self.cr.execute('select id from wkf where osv=%s and on_create=True', (self.record.model,))
wkf_ids = self.cr.fetchall()
WorkflowService.CACHE[self.cr.dbname][self.record.model] = wkf_ids
for (wkf_id, ) in wkf_ids:
WorkflowInstance.create(self.session, self.record, wkf_id)
def validate(self, signal):
result = False
# ids of all active workflow instances for a corresponding resource (id, model_nam)
self.cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (self.record.id, self.record.model, 'active'))
# TODO: Refactor the workflow instance object
for (instance_id,) in self.cr.fetchall():
wi = WorkflowInstance(self.session, self.record, {'id': instance_id})
res2 = wi.validate(signal)
result = result or res2
return result
def redirect(self, new_rid):
# get ids of wkf instances for the old resource (res_id)
# CHECKME: shouldn't we get only active instances?
self.cr.execute('select id, wkf_id from wkf_instance where res_id=%s and res_type=%s', (self.record.id, self.record.model))
for old_inst_id, workflow_id in self.cr.fetchall():
# first active instance for new resource (new_rid), using same wkf
self.cr.execute(
'SELECT id '\
'FROM wkf_instance '\
'WHERE res_id=%s AND res_type=%s AND wkf_id=%s AND state=%s',
(new_rid, self.record.model, workflow_id, 'active'))
new_id = self.cr.fetchone()
if new_id:
# select all workitems which "wait" for the old instance
self.cr.execute('select id from wkf_workitem where subflow_id=%s', (old_inst_id,))
for (item_id,) in self.cr.fetchall():
# redirect all those workitems to the wkf instance of the new resource
self.cr.execute('update wkf_workitem set subflow_id=%s where id=%s', (new_id[0], item_id))
| gpl-3.0 |
chrisdroid/nexmon | buildtools/gcc-arm-none-eabi-5_4-2016q2-osx/arm-none-eabi/lib/armv7-ar/thumb/fpu/libstdc++.a-gdb.py | 6 | 2620 | # -*- python -*-
# Copyright (C) 2009-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/Users/build/workspace/GCC-5-0-build_build-toolchain-mac/gcc-arm-none-eabi-5_4-2016q2-20160622/install-native/share/gcc-arm-none-eabi'
libdir = '/Users/build/workspace/GCC-5-0-build_build-toolchain-mac/gcc-arm-none-eabi-5_4-2016q2-20160622/install-native/arm-none-eabi/lib/armv7-ar/thumb/fpu'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
| gpl-3.0 |
meganbkratz/acq4 | acq4/analysis/atlas/AuditoryCortex/CtrlTemplate.py | 4 | 4733 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './acq4/analysis/atlas/AuditoryCortex/CtrlTemplate.ui'
#
# Created: Tue Dec 24 01:49:12 2013
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(280, 147)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(Form)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.slicePlaneCombo = QtGui.QComboBox(Form)
self.slicePlaneCombo.setObjectName(_fromUtf8("slicePlaneCombo"))
self.slicePlaneCombo.addItem(_fromUtf8(""))
self.slicePlaneCombo.addItem(_fromUtf8(""))
self.slicePlaneCombo.addItem(_fromUtf8(""))
self.slicePlaneCombo.addItem(_fromUtf8(""))
self.gridLayout.addWidget(self.slicePlaneCombo, 0, 1, 1, 1)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.label_2 = QtGui.QLabel(Form)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout_3.addWidget(self.label_2)
self.hemisphereCombo = QtGui.QComboBox(Form)
self.hemisphereCombo.setObjectName(_fromUtf8("hemisphereCombo"))
self.hemisphereCombo.addItem(_fromUtf8(""))
self.hemisphereCombo.addItem(_fromUtf8(""))
self.horizontalLayout_3.addWidget(self.hemisphereCombo)
self.gridLayout.addLayout(self.horizontalLayout_3, 1, 0, 1, 2)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.photoCheck = QtGui.QCheckBox(Form)
self.photoCheck.setObjectName(_fromUtf8("photoCheck"))
self.horizontalLayout.addWidget(self.photoCheck)
self.drawingCheck = QtGui.QCheckBox(Form)
self.drawingCheck.setChecked(True)
self.drawingCheck.setObjectName(_fromUtf8("drawingCheck"))
self.horizontalLayout.addWidget(self.drawingCheck)
self.flipCheck = QtGui.QCheckBox(Form)
self.flipCheck.setObjectName(_fromUtf8("flipCheck"))
self.horizontalLayout.addWidget(self.flipCheck)
self.gridLayout.addLayout(self.horizontalLayout, 2, 0, 1, 2)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_3 = QtGui.QLabel(Form)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_2.addWidget(self.label_3)
self.thicknessSpin = SpinBox(Form)
self.thicknessSpin.setObjectName(_fromUtf8("thicknessSpin"))
self.horizontalLayout_2.addWidget(self.thicknessSpin)
self.gridLayout.addLayout(self.horizontalLayout_2, 3, 0, 1, 2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.label.setText(_translate("Form", "Slice Plane", None))
self.slicePlaneCombo.setItemText(0, _translate("Form", "Parasaggital", None))
self.slicePlaneCombo.setItemText(1, _translate("Form", "Coronal (AVCN-DCN)", None))
self.slicePlaneCombo.setItemText(2, _translate("Form", "Coronal (PVCN-DCN)", None))
self.slicePlaneCombo.setItemText(3, _translate("Form", "Horizontal (VCN)", None))
self.label_2.setText(_translate("Form", "Hemisphere", None))
self.hemisphereCombo.setItemText(0, _translate("Form", "Left", None))
self.hemisphereCombo.setItemText(1, _translate("Form", "Right", None))
self.photoCheck.setText(_translate("Form", "Photo", None))
self.drawingCheck.setText(_translate("Form", "Drawing", None))
self.flipCheck.setText(_translate("Form", "Flip", None))
self.label_3.setText(_translate("Form", "Thickness", None))
from acq4.pyqtgraph import SpinBox
| mit |
Lab603/PicEncyclopedias | jni-build/jni-build/jni/include/tensorflow/python/kernel_tests/variable_ops_test.py | 29 | 9619 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.variable_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
_NP_TO_TF = {
np.float32: tf.float32,
np.float64: tf.float64,
np.int32: tf.int32,
np.int64: tf.int64,
}
class VariableOpTest(tf.test.TestCase):
def _initFetch(self, x, tftype, use_gpu=None):
with self.test_session(use_gpu=use_gpu):
p = state_ops.variable_op(x.shape, tftype)
op = tf.assign(p, x)
op.op.run()
return p.eval()
def _testTypes(self, vals):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
self.setUp()
x = vals.astype(dtype)
tftype = _NP_TO_TF[dtype]
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
# NOTE(touts): the GPU test should pass for all types, whether the
# Variable op has an implementation for that type on GPU as we expect
# that Variable and Assign have GPU implementations for matching tf.
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
def testset_shape(self):
p = state_ops.variable_op([1, 2], tf.float32)
self.assertEqual([1, 2], p.get_shape())
p = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())
def testAssign(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, tf.float32)
self.assertShapeEqual(value, var)
assigned = tf.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, tf.float32)
self.assertShapeEqual(value, var)
assigned = tf.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, tf.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = tf.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShapeNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, tf.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = tf.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def _NewShapelessTensor(self):
tensor = tf.placeholder(tf.float32)
self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
return tensor
def testAssignNoValueShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, tf.float32)
assigned = tf.assign(var, value)
self.assertEqual(shape, var.get_shape())
self.assertEqual(shape, assigned.get_shape())
def testAssignNoValueShapeNoValidateShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, tf.float32)
self.assertEqual(shape, var.get_shape())
assigned = tf.assign(var, value, validate_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())
def testAssignNoShape(self):
with self.test_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(tensor_shape.unknown_shape(),
tf.assign(var, value).get_shape())
def testAssignNoShapeNoValidateShape(self):
with self.test_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(tensor_shape.unknown_shape(),
tf.assign(var, value, validate_shape=False).get_shape())
def testAssignUpdate(self):
var = state_ops.variable_op([1, 2], tf.float32)
added = tf.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = tf.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoVarShape(self):
var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
added = tf.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = tf.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoValueShape(self):
var = state_ops.variable_op([1, 2], tf.float32)
added = tf.assign_add(var, self._NewShapelessTensor())
self.assertEqual([1, 2], added.get_shape())
subbed = tf.assign_sub(var, self._NewShapelessTensor())
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoShape(self):
var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
added = tf.assign_add(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
subbed = tf.assign_sub(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
def testTemporaryVariable(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable(
[1, 2],
tf.float32,
var_name="foo")
var = tf.assign(var, [[4.0, 5.0]])
var = tf.assign_add(var, [[6.0, 7.0]])
final = gen_state_ops._destroy_temporary_variable(var, var_name="foo")
self.assertAllClose([[10.0, 12.0]], final.eval())
def testDestroyNonexistentTemporaryVariable(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable([1, 2], tf.float32)
final = gen_state_ops._destroy_temporary_variable(var, var_name="bad")
with self.assertRaises(errors.NotFoundError):
final.eval()
def testDuplicateTemporaryVariable(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops._temporary_variable(
[1, 2],
tf.float32,
var_name="dup")
var1 = tf.assign(var1, [[1.0, 2.0]])
var2 = gen_state_ops._temporary_variable(
[1, 2],
tf.float32,
var_name="dup")
var2 = tf.assign(var2, [[3.0, 4.0]])
final = var1 + var2
with self.assertRaises(errors.AlreadyExistsError):
final.eval()
def testDestroyTemporaryVariableTwice(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable([1, 2], tf.float32)
val1 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
val2 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
final = val1 + val2
with self.assertRaises(errors.NotFoundError):
final.eval()
def testTemporaryVariableNoLeak(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable(
[1, 2],
tf.float32,
var_name="bar")
final = tf.identity(var)
final.eval()
def testTwoTemporaryVariablesNoLeaks(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops._temporary_variable(
[1, 2],
tf.float32,
var_name="var1")
var2 = gen_state_ops._temporary_variable(
[1, 2],
tf.float32,
var_name="var2")
final = var1 + var2
final.eval()
def testAssignDependencyAcrossDevices(self):
with self.test_session(use_gpu=True):
# The variable and an op to increment it are on the GPU.
var = state_ops.variable_op([1], tf.float32)
tf.assign(var, [1.0]).eval()
increment = tf.assign_add(var, [1.0])
with tf.control_dependencies([increment]):
with tf.device("/cpu:0"):
# This mul op is pinned to the CPU, but reads the variable from the
# GPU. The test ensures that the dependency on 'increment' is still
# honored, i.e., the Send and Recv from GPU to CPU should take place
# only after the increment.
result = tf.mul(var, var)
self.assertAllClose([4.0], result.eval())
def testIsVariableInitialized(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
v0 = state_ops.variable_op([1, 2], tf.float32)
self.assertEqual(False, tf.is_variable_initialized(v0).eval())
tf.assign(v0, [[2.0, 3.0]]).eval()
self.assertEqual(True, tf.is_variable_initialized(v0).eval())
if __name__ == "__main__":
tf.test.main()
| mit |
quokkaproject/flask-security | flask_security/core.py | 10 | 15768 | # -*- coding: utf-8 -*-
"""
flask_security.core
~~~~~~~~~~~~~~~~~~~
Flask-Security core module
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from flask import current_app, render_template
from flask_login import AnonymousUserMixin, UserMixin as BaseUserMixin, \
LoginManager, current_user
from flask_principal import Principal, RoleNeed, UserNeed, Identity, \
identity_loaded
from itsdangerous import URLSafeTimedSerializer
from passlib.context import CryptContext
from werkzeug.datastructures import ImmutableList
from werkzeug.local import LocalProxy
from werkzeug.security import safe_str_cmp
from .utils import config_value as cv, get_config, md5, url_for_security, string_types
from .views import create_blueprint
from .forms import LoginForm, ConfirmRegisterForm, RegisterForm, \
ForgotPasswordForm, ChangePasswordForm, ResetPasswordForm, \
SendConfirmationForm, PasswordlessLoginForm
# Convenient references
_security = LocalProxy(lambda: current_app.extensions['security'])
#: Default Flask-Security configuration
_default_config = {
'BLUEPRINT_NAME': 'security',
'URL_PREFIX': None,
'SUBDOMAIN': None,
'FLASH_MESSAGES': True,
'PASSWORD_HASH': 'plaintext',
'PASSWORD_SALT': None,
'LOGIN_URL': '/login',
'LOGOUT_URL': '/logout',
'REGISTER_URL': '/register',
'RESET_URL': '/reset',
'CHANGE_URL': '/change',
'CONFIRM_URL': '/confirm',
'POST_LOGIN_VIEW': '/',
'POST_LOGOUT_VIEW': '/',
'CONFIRM_ERROR_VIEW': None,
'POST_REGISTER_VIEW': None,
'POST_CONFIRM_VIEW': None,
'POST_RESET_VIEW': None,
'POST_CHANGE_VIEW': None,
'UNAUTHORIZED_VIEW': None,
'FORGOT_PASSWORD_TEMPLATE': 'security/forgot_password.html',
'LOGIN_USER_TEMPLATE': 'security/login_user.html',
'REGISTER_USER_TEMPLATE': 'security/register_user.html',
'RESET_PASSWORD_TEMPLATE': 'security/reset_password.html',
'CHANGE_PASSWORD_TEMPLATE': 'security/change_password.html',
'SEND_CONFIRMATION_TEMPLATE': 'security/send_confirmation.html',
'SEND_LOGIN_TEMPLATE': 'security/send_login.html',
'CONFIRMABLE': False,
'REGISTERABLE': False,
'RECOVERABLE': False,
'TRACKABLE': False,
'PASSWORDLESS': False,
'CHANGEABLE': False,
'SEND_REGISTER_EMAIL': True,
'SEND_PASSWORD_CHANGE_EMAIL': True,
'SEND_PASSWORD_RESET_NOTICE_EMAIL': True,
'LOGIN_WITHIN': '1 days',
'CONFIRM_EMAIL_WITHIN': '5 days',
'RESET_PASSWORD_WITHIN': '5 days',
'LOGIN_WITHOUT_CONFIRMATION': False,
'EMAIL_SENDER': 'no-reply@localhost',
'TOKEN_AUTHENTICATION_KEY': 'auth_token',
'TOKEN_AUTHENTICATION_HEADER': 'Authentication-Token',
'TOKEN_MAX_AGE': None,
'CONFIRM_SALT': 'confirm-salt',
'RESET_SALT': 'reset-salt',
'LOGIN_SALT': 'login-salt',
'CHANGE_SALT': 'change-salt',
'REMEMBER_SALT': 'remember-salt',
'DEFAULT_REMEMBER_ME': False,
'DEFAULT_HTTP_AUTH_REALM': 'Login Required',
'EMAIL_SUBJECT_REGISTER': 'Welcome',
'EMAIL_SUBJECT_CONFIRM': 'Please confirm your email',
'EMAIL_SUBJECT_PASSWORDLESS': 'Login instructions',
'EMAIL_SUBJECT_PASSWORD_NOTICE': 'Your password has been reset',
'EMAIL_SUBJECT_PASSWORD_CHANGE_NOTICE': 'Your password has been changed',
'EMAIL_SUBJECT_PASSWORD_RESET': 'Password reset instructions',
'USER_IDENTITY_ATTRIBUTES': ['email'],
'PASSWORD_SCHEMES': [
'bcrypt',
'des_crypt',
'pbkdf2_sha256',
'pbkdf2_sha512',
'sha256_crypt',
'sha512_crypt',
# And always last one...
'plaintext'
],
'DEPRECATED_PASSWORD_SCHEMES': ['auto']
}
#: Default Flask-Security messages
_default_messages = {
'UNAUTHORIZED': (
'You do not have permission to view this resource.', 'error'),
'CONFIRM_REGISTRATION': (
'Thank you. Confirmation instructions have been sent to %(email)s.', 'success'),
'EMAIL_CONFIRMED': (
'Thank you. Your email has been confirmed.', 'success'),
'ALREADY_CONFIRMED': (
'Your email has already been confirmed.', 'info'),
'INVALID_CONFIRMATION_TOKEN': (
'Invalid confirmation token.', 'error'),
'EMAIL_ALREADY_ASSOCIATED': (
'%(email)s is already associated with an account.', 'error'),
'PASSWORD_MISMATCH': (
'Password does not match', 'error'),
'RETYPE_PASSWORD_MISMATCH': (
'Passwords do not match', 'error'),
'INVALID_REDIRECT': (
'Redirections outside the domain are forbidden', 'error'),
'PASSWORD_RESET_REQUEST': (
'Instructions to reset your password have been sent to %(email)s.', 'info'),
'PASSWORD_RESET_EXPIRED': (
'You did not reset your password within %(within)s. New instructions have been sent '
'to %(email)s.', 'error'),
'INVALID_RESET_PASSWORD_TOKEN': (
'Invalid reset password token.', 'error'),
'CONFIRMATION_REQUIRED': (
'Email requires confirmation.', 'error'),
'CONFIRMATION_REQUEST': (
'Confirmation instructions have been sent to %(email)s.', 'info'),
'CONFIRMATION_EXPIRED': (
'You did not confirm your email within %(within)s. New instructions to confirm your email '
'have been sent to %(email)s.', 'error'),
'LOGIN_EXPIRED': (
'You did not login within %(within)s. New instructions to login have been sent to '
'%(email)s.', 'error'),
'LOGIN_EMAIL_SENT': (
'Instructions to login have been sent to %(email)s.', 'success'),
'INVALID_LOGIN_TOKEN': (
'Invalid login token.', 'error'),
'DISABLED_ACCOUNT': (
'Account is disabled.', 'error'),
'EMAIL_NOT_PROVIDED': (
'Email not provided', 'error'),
'INVALID_EMAIL_ADDRESS': (
'Invalid email address', 'error'),
'PASSWORD_NOT_PROVIDED': (
'Password not provided', 'error'),
'PASSWORD_NOT_SET': (
'No password is set for this user', 'error'),
'PASSWORD_INVALID_LENGTH': (
'Password must be at least 6 characters', 'error'),
'USER_DOES_NOT_EXIST': (
'Specified user does not exist', 'error'),
'INVALID_PASSWORD': (
'Invalid password', 'error'),
'PASSWORDLESS_LOGIN_SUCCESSFUL': (
'You have successfuly logged in.', 'success'),
'PASSWORD_RESET': (
'You successfully reset your password and you have been logged in automatically.',
'success'),
'PASSWORD_IS_THE_SAME': (
'Your new password must be different than your previous password.', 'error'),
'PASSWORD_CHANGE': (
'You successfully changed your password.', 'success'),
'LOGIN': (
'Please log in to access this page.', 'info'),
'REFRESH': (
'Please reauthenticate to access this page.', 'info'),
}
_default_forms = {
'login_form': LoginForm,
'confirm_register_form': ConfirmRegisterForm,
'register_form': RegisterForm,
'forgot_password_form': ForgotPasswordForm,
'reset_password_form': ResetPasswordForm,
'change_password_form': ChangePasswordForm,
'send_confirmation_form': SendConfirmationForm,
'passwordless_login_form': PasswordlessLoginForm,
}
def _user_loader(user_id):
return _security.datastore.find_user(id=user_id)
def _token_loader(token):
try:
data = _security.remember_token_serializer.loads(token, max_age=_security.token_max_age)
user = _security.datastore.find_user(id=data[0])
if user and safe_str_cmp(md5(user.password), data[1]):
return user
except:
pass
return _security.login_manager.anonymous_user()
def _identity_loader():
if not isinstance(current_user._get_current_object(), AnonymousUserMixin):
identity = Identity(current_user.id)
return identity
def _on_identity_loaded(sender, identity):
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
for role in current_user.roles:
identity.provides.add(RoleNeed(role.name))
identity.user = current_user
def _get_login_manager(app, anonymous_user):
lm = LoginManager()
lm.anonymous_user = anonymous_user or AnonymousUser
lm.login_view = '%s.login' % cv('BLUEPRINT_NAME', app=app)
lm.user_loader(_user_loader)
lm.token_loader(_token_loader)
if cv('FLASH_MESSAGES', app=app):
lm.login_message, lm.login_message_category = cv('MSG_LOGIN', app=app)
lm.needs_refresh_message, lm.needs_refresh_message_category = cv('MSG_REFRESH', app=app)
else:
lm.login_message = None
lm.needs_refresh_message = None
lm.init_app(app)
return lm
def _get_principal(app):
p = Principal(app, use_sessions=False)
p.identity_loader(_identity_loader)
return p
def _get_pwd_context(app):
pw_hash = cv('PASSWORD_HASH', app=app)
schemes = cv('PASSWORD_SCHEMES', app=app)
deprecated = cv('DEPRECATED_PASSWORD_SCHEMES', app=app)
if pw_hash not in schemes:
allowed = (', '.join(schemes[:-1]) + ' and ' + schemes[-1])
raise ValueError("Invalid hash scheme %r. Allowed values are %s" % (pw_hash, allowed))
return CryptContext(schemes=schemes, default=pw_hash, deprecated=deprecated)
def _get_serializer(app, name):
secret_key = app.config.get('SECRET_KEY')
salt = app.config.get('SECURITY_%s_SALT' % name.upper())
return URLSafeTimedSerializer(secret_key=secret_key, salt=salt)
def _get_state(app, datastore, anonymous_user=None, **kwargs):
for key, value in get_config(app).items():
kwargs[key.lower()] = value
kwargs.update(dict(
app=app,
datastore=datastore,
login_manager=_get_login_manager(app, anonymous_user),
principal=_get_principal(app),
pwd_context=_get_pwd_context(app),
remember_token_serializer=_get_serializer(app, 'remember'),
login_serializer=_get_serializer(app, 'login'),
reset_serializer=_get_serializer(app, 'reset'),
confirm_serializer=_get_serializer(app, 'confirm'),
_context_processors={},
_send_mail_task=None,
_unauthorized_callback=None
))
for key, value in _default_forms.items():
if key not in kwargs or not kwargs[key]:
kwargs[key] = value
return _SecurityState(**kwargs)
def _context_processor():
return dict(url_for_security=url_for_security, security=_security)
class RoleMixin(object):
"""Mixin for `Role` model definitions"""
def __eq__(self, other):
return (self.name == other or
self.name == getattr(other, 'name', None))
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
class UserMixin(BaseUserMixin):
"""Mixin for `User` model definitions"""
def is_active(self):
"""Returns `True` if the user is active."""
return self.active
def get_auth_token(self):
"""Returns the user's authentication token."""
data = [str(self.id), md5(self.password)]
return _security.remember_token_serializer.dumps(data)
def has_role(self, role):
"""Returns `True` if the user identifies with the specified role.
:param role: A role name or `Role` instance"""
if isinstance(role, string_types):
return role in (role.name for role in self.roles)
else:
return role in self.roles
class AnonymousUser(AnonymousUserMixin):
"""AnonymousUser definition"""
def __init__(self):
self.roles = ImmutableList()
def has_role(self, *args):
"""Returns `False`"""
return False
class _SecurityState(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key.lower(), value)
def _add_ctx_processor(self, endpoint, fn):
group = self._context_processors.setdefault(endpoint, [])
fn not in group and group.append(fn)
def _run_ctx_processor(self, endpoint):
rv = {}
for g in [None, endpoint]:
for fn in self._context_processors.setdefault(g, []):
rv.update(fn())
return rv
def context_processor(self, fn):
self._add_ctx_processor(None, fn)
def forgot_password_context_processor(self, fn):
self._add_ctx_processor('forgot_password', fn)
def login_context_processor(self, fn):
self._add_ctx_processor('login', fn)
def register_context_processor(self, fn):
self._add_ctx_processor('register', fn)
def reset_password_context_processor(self, fn):
self._add_ctx_processor('reset_password', fn)
def change_password_context_processor(self, fn):
self._add_ctx_processor('change_password', fn)
def send_confirmation_context_processor(self, fn):
self._add_ctx_processor('send_confirmation', fn)
def send_login_context_processor(self, fn):
self._add_ctx_processor('send_login', fn)
def mail_context_processor(self, fn):
self._add_ctx_processor('mail', fn)
def send_mail_task(self, fn):
self._send_mail_task = fn
def unauthorized_handler(self, fn):
self._unauthorized_callback = fn
class Security(object):
"""The :class:`Security` class initializes the Flask-Security extension.
:param app: The application.
:param datastore: An instance of a user datastore.
"""
def __init__(self, app=None, datastore=None, **kwargs):
self.app = app
self.datastore = datastore
if app is not None and datastore is not None:
self._state = self.init_app(app, datastore, **kwargs)
def init_app(self, app, datastore=None, register_blueprint=True,
login_form=None, confirm_register_form=None,
register_form=None, forgot_password_form=None,
reset_password_form=None, change_password_form=None,
send_confirmation_form=None, passwordless_login_form=None,
anonymous_user=None):
"""Initializes the Flask-Security extension for the specified
application and datastore implentation.
:param app: The application.
:param datastore: An instance of a user datastore.
:param register_blueprint: to register the Security blueprint or not.
"""
datastore = datastore or self.datastore
for key, value in _default_config.items():
app.config.setdefault('SECURITY_' + key, value)
for key, value in _default_messages.items():
app.config.setdefault('SECURITY_MSG_' + key, value)
identity_loaded.connect_via(app)(_on_identity_loaded)
state = _get_state(app, datastore,
login_form=login_form,
confirm_register_form=confirm_register_form,
register_form=register_form,
forgot_password_form=forgot_password_form,
reset_password_form=reset_password_form,
change_password_form=change_password_form,
send_confirmation_form=send_confirmation_form,
passwordless_login_form=passwordless_login_form,
anonymous_user=anonymous_user)
if register_blueprint:
app.register_blueprint(create_blueprint(state, __name__))
app.context_processor(_context_processor)
state.render_template = self.render_template
app.extensions['security'] = state
return state
def render_template(self, *args, **kwargs):
return render_template(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._state, name, None)
| mit |
aristeu/linux-2.6 | scripts/clang-tools/gen_compile_commands.py | 45 | 7980 | #!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (C) Google LLC, 2018
#
# Author: Tom Roeder <tmroeder@google.com>
#
"""A tool for generating compile_commands.json in the Linux kernel."""
import argparse
import json
import logging
import os
import re
import subprocess
_DEFAULT_OUTPUT = 'compile_commands.json'
_DEFAULT_LOG_LEVEL = 'WARNING'
_FILENAME_PATTERN = r'^\..*\.cmd$'
_LINE_PATTERN = r'^cmd_[^ ]*\.o := (.* )([^ ]*\.c)$'
_VALID_LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
def parse_arguments():
"""Sets up and parses command-line arguments.
Returns:
log_level: A logging level to filter log output.
directory: The work directory where the objects were built.
ar: Command used for parsing .a archives.
output: Where to write the compile-commands JSON file.
paths: The list of files/directories to handle to find .cmd files.
"""
usage = 'Creates a compile_commands.json database from kernel .cmd files'
parser = argparse.ArgumentParser(description=usage)
directory_help = ('specify the output directory used for the kernel build '
'(defaults to the working directory)')
parser.add_argument('-d', '--directory', type=str, default='.',
help=directory_help)
output_help = ('path to the output command database (defaults to ' +
_DEFAULT_OUTPUT + ')')
parser.add_argument('-o', '--output', type=str, default=_DEFAULT_OUTPUT,
help=output_help)
log_level_help = ('the level of log messages to produce (defaults to ' +
_DEFAULT_LOG_LEVEL + ')')
parser.add_argument('--log_level', choices=_VALID_LOG_LEVELS,
default=_DEFAULT_LOG_LEVEL, help=log_level_help)
ar_help = 'command used for parsing .a archives'
parser.add_argument('-a', '--ar', type=str, default='llvm-ar', help=ar_help)
paths_help = ('directories to search or files to parse '
'(files should be *.o, *.a, or modules.order). '
'If nothing is specified, the current directory is searched')
parser.add_argument('paths', type=str, nargs='*', help=paths_help)
args = parser.parse_args()
return (args.log_level,
os.path.abspath(args.directory),
args.output,
args.ar,
args.paths if len(args.paths) > 0 else [args.directory])
def cmdfiles_in_dir(directory):
"""Generate the iterator of .cmd files found under the directory.
Walk under the given directory, and yield every .cmd file found.
Args:
directory: The directory to search for .cmd files.
Yields:
The path to a .cmd file.
"""
filename_matcher = re.compile(_FILENAME_PATTERN)
for dirpath, _, filenames in os.walk(directory):
for filename in filenames:
if filename_matcher.match(filename):
yield os.path.join(dirpath, filename)
def to_cmdfile(path):
"""Return the path of .cmd file used for the given build artifact
Args:
Path: file path
Returns:
The path to .cmd file
"""
dir, base = os.path.split(path)
return os.path.join(dir, '.' + base + '.cmd')
def cmdfiles_for_o(obj):
"""Generate the iterator of .cmd files associated with the object
Yield the .cmd file used to build the given object
Args:
obj: The object path
Yields:
The path to .cmd file
"""
yield to_cmdfile(obj)
def cmdfiles_for_a(archive, ar):
"""Generate the iterator of .cmd files associated with the archive.
Parse the given archive, and yield every .cmd file used to build it.
Args:
archive: The archive to parse
Yields:
The path to every .cmd file found
"""
for obj in subprocess.check_output([ar, '-t', archive]).decode().split():
yield to_cmdfile(obj)
def cmdfiles_for_modorder(modorder):
"""Generate the iterator of .cmd files associated with the modules.order.
Parse the given modules.order, and yield every .cmd file used to build the
contained modules.
Args:
modorder: The modules.order file to parse
Yields:
The path to every .cmd file found
"""
with open(modorder) as f:
for line in f:
ko = line.rstrip()
base, ext = os.path.splitext(ko)
if ext != '.ko':
sys.exit('{}: module path must end with .ko'.format(ko))
mod = base + '.mod'
# The first line of *.mod lists the objects that compose the module.
with open(mod) as m:
for obj in m.readline().split():
yield to_cmdfile(obj)
def process_line(root_directory, command_prefix, file_path):
"""Extracts information from a .cmd line and creates an entry from it.
Args:
root_directory: The directory that was searched for .cmd files. Usually
used directly in the "directory" entry in compile_commands.json.
command_prefix: The extracted command line, up to the last element.
file_path: The .c file from the end of the extracted command.
Usually relative to root_directory, but sometimes absolute.
Returns:
An entry to append to compile_commands.
Raises:
ValueError: Could not find the extracted file based on file_path and
root_directory or file_directory.
"""
# The .cmd files are intended to be included directly by Make, so they
# escape the pound sign '#', either as '\#' or '$(pound)' (depending on the
# kernel version). The compile_commands.json file is not interepreted
# by Make, so this code replaces the escaped version with '#'.
prefix = command_prefix.replace('\#', '#').replace('$(pound)', '#')
# Use os.path.abspath() to normalize the path resolving '.' and '..' .
abs_path = os.path.abspath(os.path.join(root_directory, file_path))
if not os.path.exists(abs_path):
raise ValueError('File %s not found' % abs_path)
return {
'directory': root_directory,
'file': abs_path,
'command': prefix + file_path,
}
def main():
"""Walks through the directory and finds and parses .cmd files."""
log_level, directory, output, ar, paths = parse_arguments()
level = getattr(logging, log_level)
logging.basicConfig(format='%(levelname)s: %(message)s', level=level)
line_matcher = re.compile(_LINE_PATTERN)
compile_commands = []
for path in paths:
# If 'path' is a directory, handle all .cmd files under it.
# Otherwise, handle .cmd files associated with the file.
# Most of built-in objects are linked via archives (built-in.a or lib.a)
# but some objects are linked to vmlinux directly.
# Modules are listed in modules.order.
if os.path.isdir(path):
cmdfiles = cmdfiles_in_dir(path)
elif path.endswith('.o'):
cmdfiles = cmdfiles_for_o(path)
elif path.endswith('.a'):
cmdfiles = cmdfiles_for_a(path, ar)
elif path.endswith('modules.order'):
cmdfiles = cmdfiles_for_modorder(path)
else:
sys.exit('{}: unknown file type'.format(path))
for cmdfile in cmdfiles:
with open(cmdfile, 'rt') as f:
result = line_matcher.match(f.readline())
if result:
try:
entry = process_line(directory, result.group(1),
result.group(2))
compile_commands.append(entry)
except ValueError as err:
logging.info('Could not add line from %s: %s',
cmdfile, err)
with open(output, 'wt') as f:
json.dump(compile_commands, f, indent=2, sort_keys=True)
if __name__ == '__main__':
main()
| gpl-2.0 |
mohabusama/django-users-api | users_api/authorization.py | 1 | 1626 | from tastypie.exceptions import Unauthorized
from tastypie.authorization import Authorization, DjangoAuthorization
class UsersDjangoAuthorization(DjangoAuthorization):
def update_detail(self, object_list, bundle):
if bundle.request.user.id == bundle.obj.id:
return True
return super(UsersDjangoAuthorization, self).update_detail(
object_list, bundle)
class AdminOnlyAuthorization(Authorization):
def _is_authorized_list(self, object_list, bundle):
if bundle.request.user.is_superuser:
return object_list
raise Unauthorized('Admin only access.')
def _is_authorized_detail(self, object_list, bundle):
return bundle.request.user.is_superuser
def read_list(self, object_list, bundle):
return self._is_authorized_list(object_list, bundle)
def read_detail(self, object_list, bundle):
return self._is_authorized_detail(object_list, bundle)
def create_list(self, object_list, bundle):
return self._is_authorized_list(object_list, bundle)
def create_detail(self, object_list, bundle):
return self._is_authorized_detail(object_list, bundle)
def update_list(self, object_list, bundle):
return self._is_authorized_list(object_list, bundle)
def update_detail(self, object_list, bundle):
return self._is_authorized_detail(object_list, bundle)
def delete_list(self, object_list, bundle):
return self._is_authorized_list(object_list, bundle)
def delete_detail(self, object_list, bundle):
return self._is_authorized_detail(object_list, bundle)
| mit |
horance-liu/tensorflow | tensorflow/contrib/rnn/python/kernel_tests/benchmarking.py | 67 | 1906 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for benchmarking OpKernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import time
from tensorflow.python.framework import ops
def device(use_gpu=False):
"""TensorFlow device to assign ops to."""
if use_gpu:
return ops.device("/gpu:0")
return ops.device("/cpu:0")
def seconds_per_run(op, sess, num_runs=50):
"""Number of seconds taken to execute 'op' once on average."""
for _ in range(2):
sess.run(op)
start_time = time.time()
for _ in range(num_runs):
sess.run(op)
end_time = time.time()
time_taken = (end_time - start_time) / num_runs
return time_taken
def dict_product(dicts):
"""Constructs iterator over outer product of entries in a dict-of-lists.
Example:
>>> dict_products({"a": [1,2], "b": [3, 4]})
>>> [{"a": 1, "b": 3},
{"a": 1, "b": 4},
{"a": 2, "b": 3},
{"a": 2, "b": 4}]
Args:
dicts: dictionary with string keys and list values.
Yields:
Individual dicts from outer product.
"""
keys, values = zip(*dicts.items())
for config_values in itertools.product(*values):
yield dict(zip(keys, config_values))
| apache-2.0 |
viewdy/phantomjs2 | src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/message.py | 261 | 9669 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): We should just make these methods all "pure-virtual" and move
# all implementation out, into reflection.py for now.
"""Contains an abstract base class for protocol messages."""
__author__ = 'robinson@google.com (Will Robinson)'
class Error(Exception): pass
class DecodeError(Error): pass
class EncodeError(Error): pass
class Message(object):
"""Abstract base class for protocol messages.
Protocol message classes are almost always generated by the protocol
compiler. These generated types subclass Message and implement the methods
shown below.
TODO(robinson): Link to an HTML document here.
TODO(robinson): Document that instances of this class will also
have an Extensions attribute with __getitem__ and __setitem__.
Again, not sure how to best convey this.
TODO(robinson): Document that the class must also have a static
RegisterExtension(extension_field) method.
Not sure how to best express at this point.
"""
# TODO(robinson): Document these fields and methods.
__slots__ = []
DESCRIPTOR = None
def __deepcopy__(self, memo=None):
clone = type(self)()
clone.MergeFrom(self)
return clone
def __eq__(self, other_msg):
raise NotImplementedError
def __ne__(self, other_msg):
# Can't just say self != other_msg, since that would infinitely recurse. :)
return not self == other_msg
def __hash__(self):
raise TypeError('unhashable object')
def __str__(self):
raise NotImplementedError
def __unicode__(self):
raise NotImplementedError
def MergeFrom(self, other_msg):
"""Merges the contents of the specified message into current message.
This method merges the contents of the specified message into the current
message. Singular fields that are set in the specified message overwrite
the corresponding fields in the current message. Repeated fields are
appended. Singular sub-messages and groups are recursively merged.
Args:
other_msg: Message to merge into the current message.
"""
raise NotImplementedError
def CopyFrom(self, other_msg):
"""Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg: Message to copy into the current one.
"""
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg)
def Clear(self):
"""Clears all data that was set in the message."""
raise NotImplementedError
def SetInParent(self):
"""Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design."""
raise NotImplementedError
def IsInitialized(self):
"""Checks if the message is initialized.
Returns:
The method returns True if the message is initialized (i.e. all of its
required fields are set).
"""
raise NotImplementedError
# TODO(robinson): MergeFromString() should probably return None and be
# implemented in terms of a helper that returns the # of bytes read. Our
# deserialization routines would use the helper when recursively
# deserializing, but the end user would almost always just want the no-return
# MergeFromString().
def MergeFromString(self, serialized):
"""Merges serialized protocol buffer data into this message.
When we find a field in |serialized| that is already present
in this message:
- If it's a "repeated" field, we append to the end of our list.
- Else, if it's a scalar, we overwrite our field.
- Else, (it's a nonrepeated composite), we recursively merge
into the existing composite.
TODO(robinson): Document handling of unknown fields.
Args:
serialized: Any object that allows us to call buffer(serialized)
to access a string of bytes using the buffer interface.
TODO(robinson): When we switch to a helper, this will return None.
Returns:
The number of bytes read from |serialized|.
For non-group messages, this will always be len(serialized),
but for messages which are actually groups, this will
generally be less than len(serialized), since we must
stop when we reach an END_GROUP tag. Note that if
we *do* stop because of an END_GROUP tag, the number
of bytes returned does not include the bytes
for the END_GROUP tag information.
"""
raise NotImplementedError
def ParseFromString(self, serialized):
"""Like MergeFromString(), except we clear the object first."""
self.Clear()
self.MergeFromString(serialized)
def SerializeToString(self):
"""Serializes the protocol message to a binary string.
Returns:
A binary string representation of the message if all of the required
fields in the message are set (i.e. the message is initialized).
Raises:
message.EncodeError if the message isn't initialized.
"""
raise NotImplementedError
def SerializePartialToString(self):
"""Serializes the protocol message to a binary string.
This method is similar to SerializeToString but doesn't check if the
message is initialized.
Returns:
A string representation of the partial message.
"""
raise NotImplementedError
# TODO(robinson): Decide whether we like these better
# than auto-generated has_foo() and clear_foo() methods
# on the instances themselves. This way is less consistent
# with C++, but it makes reflection-type access easier and
# reduces the number of magically autogenerated things.
#
# TODO(robinson): Be sure to document (and test) exactly
# which field names are accepted here. Are we case-sensitive?
# What do we do with fields that share names with Python keywords
# like 'lambda' and 'yield'?
#
# nnorwitz says:
# """
# Typically (in python), an underscore is appended to names that are
# keywords. So they would become lambda_ or yield_.
# """
def ListFields(self):
"""Returns a list of (FieldDescriptor, value) tuples for all
fields in the message which are not empty. A singular field is non-empty
if HasField() would return true, and a repeated field is non-empty if
it contains at least one element. The fields are ordered by field
number"""
raise NotImplementedError
def HasField(self, field_name):
"""Checks if a certain field is set for the message. Note if the
field_name is not defined in the message descriptor, ValueError will be
raised."""
raise NotImplementedError
def ClearField(self, field_name):
raise NotImplementedError
def HasExtension(self, extension_handle):
raise NotImplementedError
def ClearExtension(self, extension_handle):
raise NotImplementedError
def ByteSize(self):
"""Returns the serialized size of this message.
Recursively calls ByteSize() on all contained messages.
"""
raise NotImplementedError
def _SetListener(self, message_listener):
"""Internal method used by the protocol message implementation.
Clients should not call this directly.
Sets a listener that this message will call on certain state transitions.
The purpose of this method is to register back-edges from children to
parents at runtime, for the purpose of setting "has" bits and
byte-size-dirty bits in the parent and ancestor objects whenever a child or
descendant object is modified.
If the client wants to disconnect this Message from the object tree, she
explicitly sets callback to None.
If message_listener is None, unregisters any existing listener. Otherwise,
message_listener must implement the MessageListener interface in
internal/message_listener.py, and we discard any listener registered
via a previous _SetListener() call.
"""
raise NotImplementedError
| bsd-3-clause |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/_pyio.py | 1 | 93037 | """
Python implementation of the io module.
"""
import os
import abc
import codecs
import errno
import stat
import sys
# Import _thread instead of threading to reduce startup cost
from _thread import allocate_lock as Lock
if sys.platform in {'win32', 'cygwin'}:
from msvcrt import setmode as _setmode
else:
_setmode = None
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
valid_seek_flags = {0, 1, 2} # Hardwired values
if hasattr(os, 'SEEK_HOLE') :
valid_seek_flags.add(os.SEEK_HOLE)
valid_seek_flags.add(os.SEEK_DATA)
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't want
# to inherit the C implementations.
# Rebind for compatibility
BlockingIOError = BlockingIOError
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
_IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
r"""Open file and return a stream. Raise OSError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file is
opened. It defaults to 'r' which means open for reading in text mode. Other
common values are 'w' for writing (truncating the file if it already
exists), 'x' for exclusive creation of a new file, and 'a' for appending
(which on some Unix systems, means that all writes append to the end of the
file regardless of the current seek position). In text mode, if encoding is
not specified the encoding used is platform dependent. (For reading and
writing raw bytes use binary mode and leave encoding unspecified.) The
available modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (deprecated)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
raises an `FileExistsError` if the file already exists.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
'U' mode is deprecated and will raise an exception in future versions
of Python. It has no effect in Python 3. Use newline to control
universal newlines mode.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
The newly created file is non-inheritable.
A custom opener can be used by passing a callable as *opener*. The
underlying file descriptor for the file object is then obtained by calling
*opener* with (*file*, *flags*). *opener* must return an open file
descriptor (passing os.open as *opener* results in functionality similar to
passing None).
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, int):
file = os.fspath(file)
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("axrwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
creating = "x" in modes
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if creating or writing or appending or updating:
raise ValueError("mode U cannot be combined with 'x', 'w', 'a', or '+'")
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if creating + reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (creating or reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
if binary and buffering == 1:
import warnings
warnings.warn("line buffering (buffering=1) isn't supported in binary "
"mode, the default buffer size will be used",
RuntimeWarning, 2)
raw = FileIO(file,
(creating and "x" or "") +
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd, opener=opener)
result = raw
try:
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (OSError, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return result
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif creating or writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
result = buffer
if binary:
return result
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
result = text
text.mode = mode
return result
except:
result.close()
raise
# Define a default pure-Python implementation for open_code()
# that does not allow hooks. Warn on first use. Defined for tests.
def _open_code_with_warning(path):
"""Opens the provided file with mode ``'rb'``. This function
should be used when the intent is to treat the contents as
executable code.
``path`` should be an absolute path.
When supported by the runtime, this function can be hooked
in order to allow embedders more control over code files.
This functionality is not supported on the current runtime.
"""
import warnings
warnings.warn("_pyio.open_code() may not be using hooks",
RuntimeWarning, 2)
return open(path, "rb")
try:
open_code = io.open_code
except AttributeError:
open_code = _open_code_with_warning
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ=None):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pylifecycle.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
# In normal operation, both `UnsupportedOperation`s should be bound to the
# same object.
try:
UnsupportedOperation = io.UnsupportedOperation
except AttributeError:
class UnsupportedOperation(OSError, ValueError):
pass
class IOBase(metaclass=abc.ABCMeta):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. Other bytes-like objects are accepted as method arguments too.
Text I/O classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise OSError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an OSError exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset pos. Argument pos is
interpreted relative to the position indicated by whence. Values
for whence are ints:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Some operating systems / file systems could provide additional values.
Return an int indicating the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return an int indicating the current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
finally:
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
try:
closed = self.closed
except AttributeError:
# If getting closed fails, then the object is probably
# in an unusable state, so ignore.
return
if closed:
return
if _IOBASE_EMITS_UNRAISABLE:
self.close()
else:
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return a bool indicating whether object supports random access.
If False, seek(), tell() and truncate() will raise OSError.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not seekable
"""
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return a bool indicating whether object was opened for reading.
If False, read() will raise OSError.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return a bool indicating whether object was opened for writing.
If False, write() and truncate() will raise OSError.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not writable
"""
if not self.writable():
raise UnsupportedOperation("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise a ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self): # That's a forward reference
"""Context management protocol. Returns self (an instance of IOBase)."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor (an int) if one exists.
An OSError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, size=-1):
r"""Read and return a line of bytes from the stream.
If size is specified, at most size bytes will be read.
Size should be an int.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if size >= 0:
n = min(n, size)
return n
else:
def nreadahead():
return 1
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
res = bytearray()
while size < 0 or len(res) < size:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
"""Write a list of lines to the stream.
Line separators are not added, so it is usual for each of the lines
provided to have a line separator at the end.
"""
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, size=-1):
"""Read and return up to size bytes, where size is an int.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if size is None:
size = -1
if size < 0:
return self.readall()
b = bytearray(size.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read bytes into a pre-allocated bytes-like object b.
Returns an int representing the number of bytes read (0 for EOF), or
None if the object is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than the
length of b in bytes.
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, size=-1):
"""Read and return up to size bytes, where size is an int.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, size=-1):
"""Read up to size bytes with at most one read() system call,
where size is an int.
"""
self._unsupported("read1")
def readinto(self, b):
"""Read bytes into a pre-allocated bytes-like object b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
return self._readinto(b, read1=False)
def readinto1(self, b):
"""Read bytes into buffer *b*, using at most one system call
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
return self._readinto(b, read1=True)
def _readinto(self, b, read1):
if not isinstance(b, memoryview):
b = memoryview(b)
b = b.cast('B')
if read1:
data = self.read1(len(b))
else:
data = self.read(len(b))
n = len(data)
b[:n] = data
return n
def write(self, b):
"""Write the given bytes buffer to the IO stream.
Return the number of bytes written, which is always the length of b
in bytes.
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise OSError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise OSError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush on closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __getstate__(self):
raise TypeError(f"cannot pickle {self.__class__.__name__!r} object")
def __repr__(self):
modname = self.__class__.__module__
clsname = self.__class__.__qualname__
try:
name = self.name
except AttributeError:
return "<{}.{}>".format(modname, clsname)
else:
return "<{}.{} name={!r}>".format(modname, clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
# Initialize _buffer as soon as possible since it's used by __del__()
# which calls close()
_buffer = None
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def getbuffer(self):
"""Return a readable and writable view of the buffer.
"""
if self.closed:
raise ValueError("getbuffer on closed file")
return memoryview(self._buffer)
def close(self):
if self._buffer is not None:
self._buffer.clear()
super().close()
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
if size < 0:
size = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + size)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, size=-1):
"""This is the same as read.
"""
return self.read(size)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with memoryview(b) as view:
n = view.nbytes # Size of any bytes-like object
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos_index = pos.__index__
except AttributeError:
raise TypeError(f"{pos!r} is not an integer")
else:
pos = pos_index()
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("unsupported whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos_index = pos.__index__
except AttributeError:
raise TypeError(f"{pos!r} is not an integer")
else:
pos = pos_index()
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def writable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise OSError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def readable(self):
return self.raw.readable()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, size=None):
"""Read size bytes.
Returns exactly size bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If size is negative, read until EOF or until read() would
block.
"""
if size is not None and size < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(size)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
if hasattr(self.raw, 'readall'):
chunk = self.raw.readall()
if chunk is None:
return buf[pos:] or None
else:
return buf[pos:] + chunk
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
chunk = self.raw.read()
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
chunk = self.raw.read(wanted)
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more than avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, size=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(size)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
current = self.raw.read(to_read)
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, size=-1):
"""Reads up to size bytes, with at most one read() system call."""
# Returns up to size bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if size < 0:
size = self.buffer_size
if size == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(size, len(self._read_buf) - self._read_pos))
# Implementing readinto() and readinto1() is not strictly necessary (we
# could rely on the base class that provides an implementation in terms of
# read() and read1()). We do it anyway to keep the _pyio implementation
# similar to the io implementation (which implements the methods for
# performance reasons).
def _readinto(self, buf, read1):
"""Read data into *buf* with at most one system call."""
# Need to create a memoryview object of type 'b', otherwise
# we may not be able to assign bytes to it, and slicing it
# would create a new object.
if not isinstance(buf, memoryview):
buf = memoryview(buf)
if buf.nbytes == 0:
return 0
buf = buf.cast('B')
written = 0
with self._read_lock:
while written < len(buf):
# First try to read from internal buffer
avail = min(len(self._read_buf) - self._read_pos, len(buf))
if avail:
buf[written:written+avail] = \
self._read_buf[self._read_pos:self._read_pos+avail]
self._read_pos += avail
written += avail
if written == len(buf):
break
# If remaining space in callers buffer is larger than
# internal buffer, read directly into callers buffer
if len(buf) - written > self.buffer_size:
n = self.raw.readinto(buf[written:])
if not n:
break # eof
written += n
# Otherwise refill internal buffer - unless we're
# in read1 mode and already got some data
elif not (read1 and written):
if not self._peek_unlocked(1):
break # eof
# In readinto1 mode, return as soon as we have some data
if read1 and written:
break
return written
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
if not raw.writable():
raise OSError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def writable(self):
return self.raw.writable()
def write(self, b):
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
if self.closed:
raise ValueError("write to closed file")
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush on closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise OSError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
def close(self):
with self._write_lock:
if self.raw is None or self.closed:
return
# We have to release the lock and call self.flush() (which will
# probably just re-take the lock) in case flush has been overridden in
# a subclass or the user set self.flush to something. This is the same
# behavior as the C implementation.
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
with self._write_lock:
self.raw.close()
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE):
"""Constructor.
The arguments are two RawIO instances.
"""
if not reader.readable():
raise OSError('"reader" argument must be readable.')
if not writer.writable():
raise OSError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, size=-1):
if size is None:
size = -1
return self.reader.read(size)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, size=0):
return self.reader.peek(size)
def read1(self, size=-1):
return self.reader.read1(size)
def readinto1(self, b):
return self.reader.readinto1(b)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
try:
self.writer.close()
finally:
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise OSError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, size=None):
if size is None:
size = -1
self.flush()
return BufferedReader.read(self, size)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, size=0):
self.flush()
return BufferedReader.peek(self, size)
def read1(self, size=-1):
self.flush()
return BufferedReader.read1(self, size)
def readinto1(self, b):
self.flush()
return BufferedReader.readinto1(self, b)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class FileIO(RawIOBase):
_fd = -1
_created = False
_readable = False
_writable = False
_appending = False
_seekable = None
_closefd = True
def __init__(self, file, mode='r', closefd=True, opener=None):
"""Open a file. The mode can be 'r' (default), 'w', 'x' or 'a' for reading,
writing, exclusive creation or appending. The file will be created if it
doesn't exist when opened for writing or appending; it will be truncated
when opened for writing. A FileExistsError will be raised if it already
exists when opened for creating. Opening a file for creating implies
writing so this mode behaves in a similar way to 'w'. Add a '+' to the mode
to allow simultaneous reading and writing. A custom opener can be used by
passing a callable as *opener*. The underlying file descriptor for the file
object is then obtained by calling opener with (*name*, *flags*).
*opener* must return an open file descriptor (passing os.open as *opener*
results in functionality similar to passing None).
"""
if self._fd >= 0:
# Have to close the existing file first.
try:
if self._closefd:
os.close(self._fd)
finally:
self._fd = -1
if isinstance(file, float):
raise TypeError('integer argument expected, got float')
if isinstance(file, int):
fd = file
if fd < 0:
raise ValueError('negative file descriptor')
else:
fd = -1
if not isinstance(mode, str):
raise TypeError('invalid mode: %s' % (mode,))
if not set(mode) <= set('xrwab+'):
raise ValueError('invalid mode: %s' % (mode,))
if sum(c in 'rwax' for c in mode) != 1 or mode.count('+') > 1:
raise ValueError('Must have exactly one of create/read/write/append '
'mode and at most one plus')
if 'x' in mode:
self._created = True
self._writable = True
flags = os.O_EXCL | os.O_CREAT
elif 'r' in mode:
self._readable = True
flags = 0
elif 'w' in mode:
self._writable = True
flags = os.O_CREAT | os.O_TRUNC
elif 'a' in mode:
self._writable = True
self._appending = True
flags = os.O_APPEND | os.O_CREAT
if '+' in mode:
self._readable = True
self._writable = True
if self._readable and self._writable:
flags |= os.O_RDWR
elif self._readable:
flags |= os.O_RDONLY
else:
flags |= os.O_WRONLY
flags |= getattr(os, 'O_BINARY', 0)
noinherit_flag = (getattr(os, 'O_NOINHERIT', 0) or
getattr(os, 'O_CLOEXEC', 0))
flags |= noinherit_flag
owned_fd = None
try:
if fd < 0:
if not closefd:
raise ValueError('Cannot use closefd=False with file name')
if opener is None:
fd = os.open(file, flags, 0o666)
else:
fd = opener(file, flags)
if not isinstance(fd, int):
raise TypeError('expected integer from opener')
if fd < 0:
raise OSError('Negative file descriptor')
owned_fd = fd
if not noinherit_flag:
os.set_inheritable(fd, False)
self._closefd = closefd
fdfstat = os.fstat(fd)
try:
if stat.S_ISDIR(fdfstat.st_mode):
raise IsADirectoryError(errno.EISDIR,
os.strerror(errno.EISDIR), file)
except AttributeError:
# Ignore the AttribueError if stat.S_ISDIR or errno.EISDIR
# don't exist.
pass
self._blksize = getattr(fdfstat, 'st_blksize', 0)
if self._blksize <= 1:
self._blksize = DEFAULT_BUFFER_SIZE
if _setmode:
# don't translate newlines (\r\n <=> \n)
_setmode(fd, os.O_BINARY)
self.name = file
if self._appending:
# For consistent behaviour, we explicitly seek to the
# end of file (otherwise, it might be done only on the
# first write()).
os.lseek(fd, 0, SEEK_END)
except:
if owned_fd is not None:
os.close(owned_fd)
raise
self._fd = fd
def __del__(self):
if self._fd >= 0 and self._closefd and not self.closed:
import warnings
warnings.warn('unclosed file %r' % (self,), ResourceWarning,
stacklevel=2, source=self)
self.close()
def __getstate__(self):
raise TypeError(f"cannot pickle {self.__class__.__name__!r} object")
def __repr__(self):
class_name = '%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)
if self.closed:
return '<%s [closed]>' % class_name
try:
name = self.name
except AttributeError:
return ('<%s fd=%d mode=%r closefd=%r>' %
(class_name, self._fd, self.mode, self._closefd))
else:
return ('<%s name=%r mode=%r closefd=%r>' %
(class_name, name, self.mode, self._closefd))
def _checkReadable(self):
if not self._readable:
raise UnsupportedOperation('File not open for reading')
def _checkWritable(self, msg=None):
if not self._writable:
raise UnsupportedOperation('File not open for writing')
def read(self, size=None):
"""Read at most size bytes, returned as bytes.
Only makes one system call, so less data may be returned than requested
In non-blocking mode, returns None if no data is available.
Return an empty bytes object at EOF.
"""
self._checkClosed()
self._checkReadable()
if size is None or size < 0:
return self.readall()
try:
return os.read(self._fd, size)
except BlockingIOError:
return None
def readall(self):
"""Read all data from the file, returned as bytes.
In non-blocking mode, returns as much as is immediately available,
or None if no data is available. Return an empty bytes object at EOF.
"""
self._checkClosed()
self._checkReadable()
bufsize = DEFAULT_BUFFER_SIZE
try:
pos = os.lseek(self._fd, 0, SEEK_CUR)
end = os.fstat(self._fd).st_size
if end >= pos:
bufsize = end - pos + 1
except OSError:
pass
result = bytearray()
while True:
if len(result) >= bufsize:
bufsize = len(result)
bufsize += max(bufsize, DEFAULT_BUFFER_SIZE)
n = bufsize - len(result)
try:
chunk = os.read(self._fd, n)
except BlockingIOError:
if result:
break
return None
if not chunk: # reached the end of the file
break
result += chunk
return bytes(result)
def readinto(self, b):
"""Same as RawIOBase.readinto()."""
m = memoryview(b).cast('B')
data = self.read(len(m))
n = len(data)
m[:n] = data
return n
def write(self, b):
"""Write bytes b to file, return number written.
Only makes one system call, so not all of the data may be written.
The number of bytes actually written is returned. In non-blocking mode,
returns None if the write would block.
"""
self._checkClosed()
self._checkWritable()
try:
return os.write(self._fd, b)
except BlockingIOError:
return None
def seek(self, pos, whence=SEEK_SET):
"""Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values
are SEEK_CUR or 1 (move relative to current position, positive or negative),
and SEEK_END or 2 (move relative to end of file, usually negative, although
many platforms allow seeking beyond the end of a file).
Note that not all file objects are seekable.
"""
if isinstance(pos, float):
raise TypeError('an integer is required')
self._checkClosed()
return os.lseek(self._fd, pos, whence)
def tell(self):
"""tell() -> int. Current file position.
Can raise OSError for non seekable files."""
self._checkClosed()
return os.lseek(self._fd, 0, SEEK_CUR)
def truncate(self, size=None):
"""Truncate the file to at most size bytes.
Size defaults to the current file position, as returned by tell().
The current file position is changed to the value of size.
"""
self._checkClosed()
self._checkWritable()
if size is None:
size = self.tell()
os.ftruncate(self._fd, size)
return size
def close(self):
"""Close the file.
A closed file cannot be used for further I/O operations. close() may be
called more than once without error.
"""
if not self.closed:
try:
if self._closefd:
os.close(self._fd)
finally:
super().close()
def seekable(self):
"""True if file supports random-access."""
self._checkClosed()
if self._seekable is None:
try:
self.tell()
except OSError:
self._seekable = False
else:
self._seekable = True
return self._seekable
def readable(self):
"""True if file was opened in a read mode."""
self._checkClosed()
return self._readable
def writable(self):
"""True if file was opened in a write mode."""
self._checkClosed()
return self._writable
def fileno(self):
"""Return the underlying file descriptor (an integer)."""
self._checkClosed()
return self._fd
def isatty(self):
"""True if the file is connected to a TTY device."""
self._checkClosed()
return os.isatty(self._fd)
@property
def closefd(self):
"""True if the file descriptor will be closed by close()."""
return self._closefd
@property
def mode(self):
"""String giving the file mode"""
if self._created:
if self._readable:
return 'xb+'
else:
return 'xb'
elif self._appending:
if self._readable:
return 'ab+'
else:
return 'ab'
elif self._readable:
if self._writable:
return 'rb+'
else:
return 'rb'
else:
return 'wb'
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no public constructor.
"""
def read(self, size=-1):
"""Read at most size characters from stream, where size is an int.
Read from underlying buffer until we have size characters or we hit EOF.
If size is negative or omitted, read until EOF.
Returns a string.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream and returning an int."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding(False).
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
# Initialize _buffer as soon as possible since it's used by __del__()
# which calls close()
_buffer = None
# The write_through argument has no effect here since this
# implementation always writes through. The argument is present only
# so that the signature can match the signature of the C version.
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
self._check_newline(newline)
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding(False)
if not isinstance(encoding, str):
raise ValueError("invalid encoding: %r" % encoding)
if not codecs.lookup(encoding)._is_text_encoding:
msg = ("%r is not a text encoding; "
"use codecs.open() to handle arbitrary codecs")
raise LookupError(msg % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
self._has_read1 = hasattr(self.buffer, 'read1')
self._configure(encoding, errors, newline,
line_buffering, write_through)
def _check_newline(self, newline):
if newline is not None and not isinstance(newline, str):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
def _configure(self, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
self._encoding = encoding
self._errors = errors
self._encoder = None
self._decoder = None
self._b2cratio = 0.0
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._line_buffering = line_buffering
self._write_through = write_through
# don't write a BOM in the middle of a file
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
result = "<{}.{}".format(self.__class__.__module__,
self.__class__.__qualname__)
try:
name = self.name
except AttributeError:
pass
else:
result += " name={0!r}".format(name)
try:
mode = self.mode
except AttributeError:
pass
else:
result += " mode={0!r}".format(mode)
return result + " encoding={0!r}>".format(self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def write_through(self):
return self._write_through
@property
def buffer(self):
return self._buffer
def reconfigure(self, *,
encoding=None, errors=None, newline=Ellipsis,
line_buffering=None, write_through=None):
"""Reconfigure the text stream with new parameters.
This also flushes the stream.
"""
if (self._decoder is not None
and (encoding is not None or errors is not None
or newline is not Ellipsis)):
raise UnsupportedOperation(
"It is not possible to set the encoding or newline of stream "
"after the first read")
if errors is None:
if encoding is None:
errors = self._errors
else:
errors = 'strict'
elif not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
if encoding is None:
encoding = self._encoding
else:
if not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if newline is Ellipsis:
newline = self._readnl
self._check_newline(newline)
if line_buffering is None:
line_buffering = self.line_buffering
if write_through is None:
write_through = self.write_through
self.flush()
self._configure(encoding, errors, newline,
line_buffering, write_through)
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
'Write data, where s is a str'
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, str):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
if self._has_read1:
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
else:
input_chunk = self.buffer.read(self._CHUNK_SIZE)
eof = not input_chunk
decoded_chars = self._decoder.decode(input_chunk, eof)
self._set_decoded_chars(decoded_chars)
if decoded_chars:
self._b2cratio = len(input_chunk) / len(self._decoded_chars)
else:
self._b2cratio = 0.0
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if not self._telling:
raise OSError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Fast search for an acceptable start point, close to our
# current pos.
# Rationale: calling decoder.decode() has a large overhead
# regardless of chunk size; we want the number of such calls to
# be O(1) in most situations (common decoders, sensible input).
# Actually, it will be exactly 1 for fixed-size codecs (all
# 8-bit codecs, also UTF-16 and UTF-32).
skip_bytes = int(self._b2cratio * chars_to_skip)
skip_back = 1
assert skip_bytes <= len(next_input)
while skip_bytes > 0:
decoder.setstate((b'', dec_flags))
# Decode up to temptative start point
n = len(decoder.decode(next_input[:skip_bytes]))
if n <= chars_to_skip:
b, d = decoder.getstate()
if not b:
# Before pos and no bytes buffered in decoder => OK
dec_flags = d
chars_to_skip -= n
break
# Skip back by buffered amount and reset heuristic
skip_bytes -= len(b)
skip_back = 1
else:
# We're too far ahead, skip back a bit
skip_bytes -= skip_back
skip_back = skip_back * 2
else:
skip_bytes = 0
decoder.setstate((b'', dec_flags))
# Note our initial start point.
start_pos = position + skip_bytes
start_flags = dec_flags
if chars_to_skip == 0:
# We haven't moved from the start point.
return self._pack_cookie(start_pos, start_flags)
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
bytes_fed = 0
need_eof = 0
# Chars decoded since `start_pos`
chars_decoded = 0
for i in range(skip_bytes, len(next_input)):
bytes_fed += 1
chars_decoded += len(decoder.decode(next_input[i:i+1]))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise OSError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
def _reset_encoder(position):
"""Reset the encoder (merely useful for proper BOM handling)"""
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if position != 0:
encoder.setstate(0)
else:
encoder.reset()
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if whence == SEEK_CUR:
if cookie != 0:
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
elif whence == SEEK_END:
if cookie != 0:
raise UnsupportedOperation("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, whence)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
_reset_encoder(position)
return position
if whence != 0:
raise ValueError("unsupported whence (%r)" % (whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise OSError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
_reset_encoder(cookie)
return cookie
def read(self, size=None):
self._checkReadable()
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
decoder = self._decoder or self._get_decoder()
if size < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have size characters to return.
eof = False
result = self._get_decoded_chars(size)
while len(result) < size and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(size - len(result))
return result
def __next__(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, size=None):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
else:
try:
size_index = size.__index__
except AttributeError:
raise TypeError(f"{size!r} is not an integer")
else:
size = size_index()
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if size >= 0 and len(line) >= size:
endpos = size # reached length size
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if size >= 0 and endpos > size:
endpos = size # don't exceed size
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="surrogatepass",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value is not None:
if not isinstance(initial_value, str):
raise TypeError("initial_value must be str or None, not {0}"
.format(type(initial_value).__name__))
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
decoder = self._decoder or self._get_decoder()
old_state = decoder.getstate()
decoder.reset()
try:
return decoder.decode(self.buffer.getvalue(), final=True)
finally:
decoder.setstate(old_state)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's an implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
| apache-2.0 |
Gabriel439/pants | migrations/0.0.24/src/python/publish_migration.py | 17 | 1194 | #!/usr/bin/python
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import sys
if len(sys.argv) != 3:
print("Usage: publish_migration.py <publish.properties> <directory to write new files>")
exit(1)
filename = sys.argv[1]
new_base_dir = sys.argv[2]
def extract_artifact(line):
splitline = line.split('%')
org = re.sub(r'^revision\.[a-z_]+\.', '', splitline[0])
name = re.sub(r'=.*', '', splitline[1].rstrip())
return (org, name)
with open(filename) as f:
content = f.readlines()
for line in content:
# For each line get the org and name, make a directory with these
# and open the publish file.
artifact = extract_artifact(line)
(org, name) = artifact
publish_dir = os.path.join(new_base_dir, org, name)
if not os.path.exists(publish_dir):
os.makedirs(publish_dir)
with open(os.path.join(publish_dir, 'publish.properties'), 'a') as output:
output.write(line)
| apache-2.0 |
xuleiboy1234/autoTitle | tensorflow/tensorflow/examples/learn/wide_n_deep_tutorial.py | 18 | 8111 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
import tempfile
import pandas as pd
from six.moves import urllib
import tensorflow as tf
CSV_COLUMNS = [
"age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"
]
gender = tf.feature_column.categorical_column_with_vocabulary_list(
"gender", ["Female", "Male"])
education = tf.feature_column.categorical_column_with_vocabulary_list(
"education", [
"Bachelors", "HS-grad", "11th", "Masters", "9th",
"Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th",
"Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th",
"Preschool", "12th"
])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
"marital_status", [
"Married-civ-spouse", "Divorced", "Married-spouse-absent",
"Never-married", "Separated", "Married-AF-spouse", "Widowed"
])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
"relationship", [
"Husband", "Not-in-family", "Wife", "Own-child", "Unmarried",
"Other-relative"
])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
"workclass", [
"Self-emp-not-inc", "Private", "State-gov", "Federal-gov",
"Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked"
])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.feature_column.categorical_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.feature_column.numeric_column("age")
education_num = tf.feature_column.numeric_column("education_num")
capital_gain = tf.feature_column.numeric_column("capital_gain")
capital_loss = tf.feature_column.numeric_column("capital_loss")
hours_per_week = tf.feature_column.numeric_column("hours_per_week")
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
gender, education, marital_status, relationship, workclass, occupation,
native_country, age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
["education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, "education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
["native_country", "occupation"], hash_bucket_size=1000)
]
deep_columns = [
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(gender),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(native_country, dimension=8),
tf.feature_column.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s"% test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
if model_type == "wide":
m = tf.estimator.LinearClassifier(
model_dir=model_dir, feature_columns=base_columns + crossed_columns)
elif model_type == "deep":
m = tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=crossed_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(data_file, num_epochs, shuffle):
"""Input builder function."""
df_data = pd.read_csv(
tf.gfile.Open(data_file),
names=CSV_COLUMNS,
skipinitialspace=True,
engine="python",
skiprows=1)
# remove NaN elements
df_data = df_data.dropna(how="any", axis=0)
labels = df_data["income_bracket"].apply(lambda x: ">50K" in x).astype(int)
return tf.estimator.inputs.pandas_input_fn(
x=df_data,
y=labels,
batch_size=100,
num_epochs=num_epochs,
shuffle=shuffle,
num_threads=5)
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data):
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download(train_data, test_data)
# Specify file path below if want to find the output easily
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
m = build_estimator(model_dir, model_type)
# set num_epochs to None to get infinite stream of data.
m.train(
input_fn=input_fn(train_file_name, num_epochs=None, shuffle=True),
steps=train_steps)
# set steps to None to run evaluation until all data consumed.
results = m.evaluate(
input_fn=input_fn(test_file_name, num_epochs=1, shuffle=False),
steps=None)
print("model directory = %s" % model_dir)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
# Manual cleanup
shutil.rmtree(model_dir)
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps,
FLAGS.train_data, FLAGS.test_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=2000,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.