repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Jimdo/ansible-modules-core | commands/command.py | 37 | 9569 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import copy
import sys
import datetime
import traceback
import re
import shlex
import os
DOCUMENTATION = '''
---
module: command
version_added: historical
short_description: Executes a command on a remote node
description:
- The M(command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($HOME) and operations
like C("<"), C(">"), C("|"), and C("&") will not work (use the M(shell)
module if you need these features).
options:
free_form:
description:
- the command module takes a free form command to run. There is no parameter actually named 'free form'.
See the examples!
required: true
default: null
aliases: []
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: no
default: null
removes:
description:
- a filename, when it does not exist, this step will B(not) be run.
version_added: "0.8"
required: no
default: null
chdir:
description:
- cd into this directory before running the command
version_added: "0.6"
required: false
default: null
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
required: false
default: null
version_added: "0.9"
warn:
version_added: "1.8"
default: yes
description:
- if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false.
required: false
default: True
notes:
- If you want to run a command through the shell (say you are using C(<),
C(>), C(|), etc), you actually want the M(shell) module instead. The
M(command) module is much more secure as it's not affected by the user's
environment.
- " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this."
author: Michael DeHaan
'''
EXAMPLES = '''
# Example from Ansible Playbooks.
- command: /sbin/shutdown -t now
# Run the command if the specified file does not exist.
- command: /usr/bin/make_database.sh arg1 arg2 creates=/path/to/database
# You can also use the 'args' form to provide the options. This command
# will change the working directory to somedir/ and will only run when
# /path/to/database doesn't exist.
- command: /usr/bin/make_database.sh arg1 arg2
args:
chdir: somedir/
creates: /path/to/database
'''
# Dict of options and their defaults
OPTIONS = {'chdir': None,
'creates': None,
'executable': None,
'NO_LOG': None,
'removes': None,
'warn': True,
}
# This is a pretty complex regex, which functions as follows:
#
# 1. (^|\s)
# ^ look for a space or the beginning of the line
# 2. ({options_list})=
# ^ expanded to (chdir|creates|executable...)=
# look for a valid param, followed by an '='
# 3. (?P<quote>[\'"])?
# ^ look for an optional quote character, which can either be
# a single or double quote character, and store it for later
# 4. (.*?)
# ^ match everything in a non-greedy manner until...
# 5. (?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)
# ^ a non-escaped space or a non-escaped quote of the same kind
# that was matched in the first 'quote' is found, or the end of
# the line is reached
OPTIONS_REGEX = '|'.join(OPTIONS.keys())
PARAM_REGEX = re.compile(
r'(^|\s)(' + OPTIONS_REGEX +
r')=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)'
)
def check_command(commandline):
arguments = { 'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
'ln': 'state=link', 'mkdir': 'state=directory',
'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch' }
commands = { 'git': 'git', 'hg': 'hg', 'curl': 'get_url', 'wget': 'get_url',
'svn': 'subversion', 'service': 'service',
'mount': 'mount', 'rpm': 'yum', 'yum': 'yum', 'apt-get': 'apt-get',
'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile',
'rsync': 'synchronize' }
warnings = list()
command = os.path.basename(commandline.split()[0])
if command in arguments:
warnings.append("Consider using file module with %s rather than running %s" % (arguments[command], command))
if command in commands:
warnings.append("Consider using %s module rather than running %s" % (commands[command], command))
return warnings
def main():
# the command module is the one ansible module that does not take key=value args
# hence don't copy this one if you are looking to build others!
module = CommandModule(argument_spec=dict())
shell = module.params['shell']
chdir = module.params['chdir']
executable = module.params['executable']
args = module.params['args']
creates = module.params['creates']
removes = module.params['removes']
warn = module.params['warn']
if args.strip() == '':
module.fail_json(rc=256, msg="no command given")
if chdir:
os.chdir(chdir)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
v = os.path.expanduser(creates)
if os.path.exists(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % v,
changed=False,
stderr=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
v = os.path.expanduser(removes)
if not os.path.exists(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % v,
changed=False,
stderr=False,
rc=0
)
warnings = list()
if warn:
warnings = check_command(args)
if not shell:
args = shlex.split(args)
startd = datetime.datetime.now()
rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell)
endd = datetime.datetime.now()
delta = endd - startd
if out is None:
out = ''
if err is None:
err = ''
module.exit_json(
cmd = args,
stdout = out.rstrip("\r\n"),
stderr = err.rstrip("\r\n"),
rc = rc,
start = str(startd),
end = str(endd),
delta = str(delta),
changed = True,
warnings = warnings
)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.splitter import *
# only the command module should ever need to do this
# everything else should be simple key=value
class CommandModule(AnsibleModule):
def _handle_aliases(self):
return {}
def _check_invalid_arguments(self):
pass
def _load_params(self):
''' read the input and return a dictionary and the arguments string '''
args = MODULE_ARGS
params = copy.copy(OPTIONS)
params['shell'] = False
if "#USE_SHELL" in args:
args = args.replace("#USE_SHELL", "")
params['shell'] = True
items = split_args(args)
for x in items:
quoted = x.startswith('"') and x.endswith('"') or x.startswith("'") and x.endswith("'")
if '=' in x and not quoted:
# check to see if this is a special parameter for the command
k, v = x.split('=', 1)
v = unquote(v.strip())
if k in OPTIONS.keys():
if k == "chdir":
v = os.path.abspath(os.path.expanduser(v))
if not (os.path.exists(v) and os.path.isdir(v)):
self.fail_json(rc=258, msg="cannot change to directory '%s': path does not exist" % v)
elif k == "executable":
v = os.path.abspath(os.path.expanduser(v))
if not (os.path.exists(v)):
self.fail_json(rc=258, msg="cannot use executable '%s': file does not exist" % v)
params[k] = v
# Remove any of the above k=v params from the args string
args = PARAM_REGEX.sub('', args)
params['args'] = args.strip()
return (params, params['args'])
main()
| gpl-3.0 |
robweber/maraschino | lib/sqlalchemy/dialects/mssql/__init__.py | 23 | 1112 | # mssql/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.mssql import base, pyodbc, adodbapi, \
pymssql, zxjdbc, mxodbc
base.dialect = pyodbc.dialect
from sqlalchemy.dialects.mssql.base import \
INTEGER, BIGINT, SMALLINT, TINYINT, VARCHAR, NVARCHAR, CHAR, \
NCHAR, TEXT, NTEXT, DECIMAL, NUMERIC, FLOAT, DATETIME,\
DATETIME2, DATETIMEOFFSET, DATE, TIME, SMALLDATETIME, \
BINARY, VARBINARY, BIT, REAL, IMAGE, TIMESTAMP,\
MONEY, SMALLMONEY, UNIQUEIDENTIFIER, SQL_VARIANT, dialect
__all__ = (
'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME',
'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP',
'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect'
) | mit |
xtenex/raft | extras/RaftCaptureProcessor.py | 10 | 10007 | #
# A Python 3 urllib compatible processor module to generate RAFT capture files
#
# Copyright (c) 2011-2013 by RAFT Team
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
#
import urllib.request, urllib.error, urllib.parse
import io
import os
import time
import sys
import lzma
import re
import string
import threading
import base64
from urllib import parse as urlparse
from xml.sax.saxutils import escape, quoteattr
class RaftCaptureProcessor(urllib.request.BaseHandler):
class _wrapper(io.BytesIO):
def __init__(self, parent, request, response):
request = request
self.response = response
data = parent.write_capture(request, response)
io.BytesIO.__init__(self, data)
def __getattr__(self, name):
return getattr(self.response,name)
def __init__(self, directory, cut_count = 10000):
self.lock = threading.Lock()
self.directory = directory
self.re_nonprintable = re.compile(bytes('[^%s]' % re.escape('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r'), 'ascii'))
self.re_nonprintable_str = re.compile('[^%s]' % re.escape('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r'))
self.cut_count = cut_count # TODO: add max size as well
self.open_file()
def open_file(self):
now = time.time()
self.filename = os.path.join(self.directory, 'RaftCapture-{0}.xml.xz'.format(int(now*1000)))
self.ofhandle = lzma.LZMAFile(self.filename, 'wb')
self.ofhandle.write(b'<raft version="1.0">\n')
self.write_count = 0
def close(self):
self.ofhandle.write(b'</raft>')
self.ofhandle.close()
def http_request(self, req):
return req
def http_response(self, req, response):
return RaftCaptureProcessor._wrapper(self, req, response)
def https_request(self, req):
return req
def https_response(self, req, response):
return RaftCaptureProcessor._wrapper(self, req, response)
def write_capture(self, request, response):
acquired = False
try:
acquired = self.lock.acquire()
return self.__write_capture(request, response)
finally:
if acquired:
self.lock.release()
def __write_capture(self, request, response):
ohandle = io.StringIO()
response_body = b''
saved_exception = None
try:
ohandle.write('<capture>\n')
ohandle.write('<request>\n')
method = request.get_method()
url = request.get_full_url()
parsed = urlparse.urlsplit(url)
relative_url = parsed.path
if parsed.query:
relative_url += '?' + parsed.query
if parsed.fragment:
# TODO: will this ever happen?
relative_url += '#' + parsed.fragment
host = None
request_body = None
if hasattr(request, 'get_host'):
host = request.get_host()
# support 3.3
if request.has_data():
request_body = request.get_data()
else:
host = request.host
request_body = request.data
ohandle.write('<method>%s</method>\n' % escape(method))
ohandle.write('<url>%s</url>\n' % escape(url))
ohandle.write('<host>%s</host>\n' % escape(host))
try:
# ghetto
addr = response.fp.raw._sock.getpeername()
if addr:
ohandle.write('<hostip>%s</hostip>\n' % escape(addr[0]))
except Exception as error:
pass
ohandle.write('<datetime>%s</datetime>\n' % escape(time.asctime(time.gmtime())+' GMT')) # TODO: can we calculate request time and elapsed?
request_headers = '%s %s HTTP/1.1\r\n' % (method, relative_url) # TODO: is there access to the HTTP version?
for item in request.header_items():
request_headers += item[0] + ': ' + '\r\n\t'.join(item[1:]) + '\r\n'
if self.re_nonprintable_str.search(request_headers):
ohandle.write('<headers encoding="base64">%s</headers>\n' % base64.b64encode(request_headers.encode('utf-8')).decode('ascii'))
else:
ohandle.write('<headers>%s</headers>\n' % escape(request_headers))
if request_body is not None:
if self.re_nonprintable.search(request_body):
ohandle.write('<body encoding="base64">%s</body>\n' % base64.b64encode(request_body).decode('ascii'))
else:
ohandle.write('<body>%s</body>\n' % escape(request_body.decode('ascii')))
ohandle.write('</request>\n')
ohandle.write('<response>\n')
status = int(response.getcode())
ohandle.write('<status>%d</status>\n' % status)
headers = response.info()
if 'HEAD' == method or status < 200 or status in (204, 304,):
response_body = b''
else:
try:
response_body = response.read()
except urllib2.IncompleteRead as e:
saved_exception = e
response_headers = 'HTTP/1.1 %d %s\r\n' % (status, response.msg) # TODO: is there access to the HTTP version?
response_headers += headers.as_string()
content_type = headers.get('Content-Type')
content_length = headers.get('Content-Length')
if content_type:
ohandle.write('<content_type>%s</content_type>\n' % escape(content_type))
if content_length:
ohandle.write('<content_length>%d</content_length>\n' % int(content_length))
if self.re_nonprintable_str.search(response_headers):
ohandle.write('<headers encoding="base64">%s</headers>\n' % base64.b64encode(response_headers.encode('utf-8')).decode('ascii'))
else:
ohandle.write('<headers>%s</headers>\n' % escape(response_headers))
if response_body:
if self.re_nonprintable.search(response_body):
ohandle.write('<body encoding="base64">%s</body>\n' % base64.b64encode(response_body).decode('ascii'))
else:
ohandle.write('<body>%s</body>\n' % escape(response_body.decode('ascii')))
ohandle.write('</response>\n')
ohandle.write('</capture>\n')
self.ofhandle.write(ohandle.getvalue().encode('utf-8'))
ohandle.close()
self.write_count += 1
if 0 == (self.write_count % self.cut_count):
self.close()
self.open_file()
except Exception as e:
sys.stderr.write('*** unhandled error in RaftCaptureProcessor: %s\n' % (e))
if saved_exception:
raise(saved_exception)
return response_body
class IgnoreRedirect(urllib.request.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, hdrs):
return fp
def http_error_302(self, req, fp, code, msg, hdrs):
return fp
def http_error_303(self, req, fp, code, msg, hdrs):
return fp
def http_error_307(self, req, fp, code, msg, hdrs):
return fp
if '__main__' == __name__:
# test and sample code
from contextlib import closing
if len(sys.argv) == 1:
targets = ['www.bing.com']
else:
count = 0
targets = []
for line in open(sys.argv[1], 'r'):
hostname = line.rstrip()
if ',' in hostname:
hostname = hostname.split(',',1)[1]
targets.append(hostname)
count += 1
if count > 10:
break
with closing(RaftCaptureProcessor('.')) as raftCapture:
# proxyHandler = urllib2.ProxyHandler({'http':'localhost:8080', 'https':'localhost:8080'})
opener = urllib.request.build_opener(raftCapture, )
for target in targets:
url = 'http://'+target+'/'
req = urllib.request.Request(url)
req.add_header('User-agent', 'Mozilla/5.0 (Windows NT 5.1; rv:2.0) Gecko/20100101 Firefox/4.0')
try:
response = opener.open(req, timeout=5)
except urllib.error.HTTPError as error:
response = error
except urllib.error.URLError as error:
sys.stdout.write('failed on %s: %s\n' % (url, error))
sys.stdout.flush()
response = None
if False and response:
print(('%d %s' % (response.getcode(), response.msg)))
print((''.join(response.headers._headers)))
print((response.read()))
| gpl-3.0 |
AstroPrint/AstroBox | src/astroprint/camera/v4l2/gstreamer/process/pipelines/bins/h264_video_enc.py | 1 | 2199 | # coding=utf-8
__author__ = "AstroPrint Product Team <product@astroprint.com>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import logging
from gi.repository import Gst
from .base_video_enc import VideoEncBinBase
class H264VideoEncBin(VideoEncBinBase):
def __init__(self, size, rotation):
super(H264VideoEncBin, self).__init__(size, rotation)
self._logger = logging.getLogger(__name__)
def _constructEncChain(self):
self.__encoderElement = Gst.ElementFactory.make('omxh264enc', 'h264_encoder')
# CAPABILITIES FOR H264 OUTPUT
self.__encoderCaps = Gst.ElementFactory.make("capsfilter", "encoder_filter")
self.__encoderCaps.set_property("caps", Gst.Caps.from_string('video/x-h264,profile=high'))
# VIDEO PAY FOR H264 BEING SHARED IN UDP PACKAGES
self.__rtpElement = Gst.ElementFactory.make('rtph264pay', 'h264_rtp')
self.__rtpElement.set_property('pt', 96)
self.__rtpElement.set_property('config-interval', 1)
self._bin.add(self.__encoderElement)
self._bin.add(self.__encoderCaps)
self._bin.add(self.__rtpElement)
#H264 created weird gree/red bands when the the size is not divisible by 16
#We should crop to the closes if that happens
first_element = None
if self._rotation in [1,3]:
#dimentions are flipped
height, width = self._size
else:
width, height = self._size
modulo_w = width % 16
modulo_h = height % 16
if modulo_w > 0 or modulo_h > 0:
self.__cropElement = Gst.ElementFactory.make('videocrop', 'videocrop')
if modulo_w > 0:
half_w = modulo_w/2
self.__cropElement.set_property('left', half_w)
self.__cropElement.set_property('right', modulo_w - half_w)
if modulo_h > 0:
half_h = modulo_h/2
self.__cropElement.set_property('top', half_h)
self.__cropElement.set_property('bottom', modulo_h - half_h)
self._bin.add(self.__cropElement)
self.__cropElement.link(self.__encoderElement)
first_element = self.__cropElement
else:
first_element = self.__encoderElement
self.__encoderElement.link(self.__encoderCaps)
self.__encoderCaps.link(self.__rtpElement)
return first_element, self.__rtpElement
def _getUdpPort(self):
return 8004
| agpl-3.0 |
panyang/Wikipedia_Word2vec | v2/train_word2vec_with_gensim.py | 1 | 2027 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Pan Yang (panyangnlp@gmail.com)
# Copyright 2017 @ Yu Zhen
import gensim
import logging
import multiprocessing
import os
import re
import sys
from pattern.en import tokenize
from time import time
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, ' ', raw_html)
return cleantext
class MySentences(object):
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for root, dirs, files in os.walk(self.dirname):
for filename in files:
file_path = root + '/' + filename
for line in open(file_path):
sline = line.strip()
if sline == "":
continue
rline = cleanhtml(sline)
tokenized_line = ' '.join(tokenize(rline))
is_alpha_word_line = [word for word in
tokenized_line.lower().split()
if word.isalpha()]
yield is_alpha_word_line
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Please use python train_with_gensim.py data_path"
exit()
data_path = sys.argv[1]
begin = time()
sentences = MySentences(data_path)
model = gensim.models.Word2Vec(sentences,
size=200,
window=10,
min_count=10,
workers=multiprocessing.cpu_count())
model.save("data/model/word2vec_gensim")
model.wv.save_word2vec_format("data/model/word2vec_org",
"data/model/vocabulary",
binary=False)
end = time()
print "Total procesing time: %d seconds" % (end - begin)
| mit |
agry/NGECore2 | scripts/mobiles/lok/elder_snorbal_male.py | 4 | 1710 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('elder_snorbal_male')
mobileTemplate.setLevel(35)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Herbivore Meat")
mobileTemplate.setMeatAmount(145)
mobileTemplate.setHideType("Leathery Hide")
mobileTemplate.setHideAmount(135)
mobileTemplate.setBoneType("Animal Bone")
mobileTemplate.setBoneAmount(135)
mobileTemplate.setSocialGroup("self")
mobileTemplate.setAssistRange(8)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_elder_snorbal_male.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
attacks.add('bm_bite_3')
attacks.add('bm_charge_3')
attacks.add('bm_dampen_pain_3')
attacks.add('bm_shaken_1')
attacks.add('bm_stomp_3')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('elder_snorbal_male', mobileTemplate)
return | lgpl-3.0 |
dmitry-r/incubator-airflow | airflow/ti_deps/deps/prev_dagrun_dep.py | 38 | 3341 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.db import provide_session
from airflow.utils.state import State
class PrevDagrunDep(BaseTIDep):
"""
Is the past dagrun in a state that allows this task instance to run, e.g. did this
task instance's task in the previous dagrun complete if we are depending on past.
"""
NAME = "Previous Dagrun State"
IGNOREABLE = True
IS_TASK_DEP = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if dep_context.ignore_depends_on_past:
yield self._passing_status(
reason="The context specified that the state of past DAGs could be "
"ignored.")
return
if not ti.task.depends_on_past:
yield self._passing_status(
reason="The task did not have depends_on_past set.")
return
# Don't depend on the previous task instance if we are the first task
dag = ti.task.dag
if dag.catchup:
if dag.previous_schedule(ti.execution_date) is None:
yield self._passing_status(
reason="This task does not have a schedule or is @once"
)
return
if dag.previous_schedule(ti.execution_date) < ti.task.start_date:
yield self._passing_status(
reason="This task instance was the first task instance for its task.")
return
else:
dr = ti.get_dagrun()
last_dagrun = dr.get_previous_dagrun() if dr else None
if not last_dagrun:
yield self._passing_status(
reason="This task instance was the first task instance for its task.")
return
previous_ti = ti.previous_ti
if not previous_ti:
yield self._failing_status(
reason="depends_on_past is true for this task's DAG, but the previous "
"task instance has not run yet.")
return
if previous_ti.state not in {State.SKIPPED, State.SUCCESS}:
yield self._failing_status(
reason="depends_on_past is true for this task, but the previous task "
"instance {0} is in the state '{1}' which is not a successful "
"state.".format(previous_ti, previous_ti.state))
previous_ti.task = ti.task
if (ti.task.wait_for_downstream and
not previous_ti.are_dependents_done(session=session)):
yield self._failing_status(
reason="The tasks downstream of the previous task instance {0} haven't "
"completed.".format(previous_ti))
| apache-2.0 |
idwanglu2010/git-repo | subcmds/selfupdate.py | 82 | 1931 | #
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import SUPPRESS_HELP
import sys
from command import Command, MirrorSafeCommand
from subcmds.sync import _PostRepoUpgrade
from subcmds.sync import _PostRepoFetch
class Selfupdate(Command, MirrorSafeCommand):
common = False
helpSummary = "Update repo to the latest version"
helpUsage = """
%prog
"""
helpDescription = """
The '%prog' command upgrades repo to the latest version, if a
newer version is available.
Normally this is done automatically by 'repo sync' and does not
need to be performed by an end-user.
"""
def _Options(self, p):
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def Execute(self, opt, args):
rp = self.manifest.repoProject
rp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest)
else:
if not rp.Sync_NetworkHalf():
print >>sys.stderr, "error: can't update repo"
sys.exit(1)
rp.bare_git.gc('--auto')
_PostRepoFetch(rp,
no_repo_verify = opt.no_repo_verify,
verbose = True)
| apache-2.0 |
artwr/airflow | airflow/contrib/operators/qubole_operator.py | 1 | 10014 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.qubole_hook import QuboleHook, COMMAND_ARGS, HYPHEN_ARGS, \
flatten_list, POSITIONAL_ARGS
class QuboleOperator(BaseOperator):
"""
Execute tasks (commands) on QDS (https://qubole.com).
:param qubole_conn_id: Connection id which consists of qds auth_token
:type qubole_conn_id: str
kwargs:
:command_type: type of command to be executed, e.g. hivecmd, shellcmd, hadoopcmd
:tags: array of tags to be assigned with the command
:cluster_label: cluster label on which the command will be executed
:name: name to be given to command
:notify: whether to send email on command completion or not (default is False)
**Arguments specific to command types**
hivecmd:
:query: inline query statement
:script_location: s3 location containing query statement
:sample_size: size of sample in bytes on which to run query
:macros: macro values which were used in query
:sample_size: size of sample in bytes on which to run query
:hive-version: Specifies the hive version to be used. eg: 0.13,1.2,etc.
prestocmd:
:query: inline query statement
:script_location: s3 location containing query statement
:macros: macro values which were used in query
hadoopcmd:
:sub_commnad: must be one these ["jar", "s3distcp", "streaming"] followed by
1 or more args
shellcmd:
:script: inline command with args
:script_location: s3 location containing query statement
:files: list of files in s3 bucket as file1,file2 format. These files will be
copied into the working directory where the qubole command is being
executed.
:archives: list of archives in s3 bucket as archive1,archive2 format. These
will be unarchived into the working directory where the qubole command is
being executed
:parameters: any extra args which need to be passed to script (only when
script_location is supplied)
pigcmd:
:script: inline query statement (latin_statements)
:script_location: s3 location containing pig query
:parameters: any extra args which need to be passed to script (only when
script_location is supplied
sparkcmd:
:program: the complete Spark Program in Scala, SQL, Command, R, or Python
:cmdline: spark-submit command line, all required information must be specify
in cmdline itself.
:sql: inline sql query
:script_location: s3 location containing query statement
:language: language of the program, Scala, SQL, Command, R, or Python
:app_id: ID of an Spark job server app
:arguments: spark-submit command line arguments
:user_program_arguments: arguments that the user program takes in
:macros: macro values which were used in query
:note_id: Id of the Notebook to run
dbtapquerycmd:
:db_tap_id: data store ID of the target database, in Qubole.
:query: inline query statement
:macros: macro values which were used in query
dbexportcmd:
:mode: Can be 1 for Hive export or 2 for HDFS/S3 export
:schema: Db schema name assumed accordingly by database if not specified
:hive_table: Name of the hive table
:partition_spec: partition specification for Hive table.
:dbtap_id: data store ID of the target database, in Qubole.
:db_table: name of the db table
:db_update_mode: allowinsert or updateonly
:db_update_keys: columns used to determine the uniqueness of rows
:export_dir: HDFS/S3 location from which data will be exported.
:fields_terminated_by: hex of the char used as column separator in the dataset
:use_customer_cluster: To use cluster to run command
:customer_cluster_label: the label of the cluster to run the command on
:additional_options: Additional Sqoop options which are needed enclose options in
double or single quotes e.g. '--map-column-hive id=int,data=string'
dbimportcmd:
:mode: 1 (simple), 2 (advance)
:hive_table: Name of the hive table
:schema: Db schema name assumed accordingly by database if not specified
:hive_serde: Output format of the Hive Table
:dbtap_id: data store ID of the target database, in Qubole.
:db_table: name of the db table
:where_clause: where clause, if any
:parallelism: number of parallel db connections to use for extracting data
:extract_query: SQL query to extract data from db. $CONDITIONS must be part
of the where clause.
:boundary_query: Query to be used get range of row IDs to be extracted
:split_column: Column used as row ID to split data into ranges (mode 2)
:use_customer_cluster: To use cluster to run command
:customer_cluster_label: the label of the cluster to run the command on
:additional_options: Additional Sqoop options which are needed enclose options in
double or single quotes
.. note:
Following fields are template-supported : ``query``, ``script_location``,
``sub_command``, ``script``, ``files``, ``archives``, ``program``, ``cmdline``,
``sql``, ``where_clause``, ``extract_query``, ``boundary_query``, ``macros``,
``tags``, ``name``, ``parameters``, ``dbtap_id``, ``hive_table``, ``db_table``,
``split_column``, ``note_id``, ``db_update_keys``, ``export_dir``,
``partition_spec``, ``qubole_conn_id``, ``arguments``, ``user_program_arguments``.
You can also use ``.txt`` files for template driven use cases.
.. note:
In QuboleOperator there is a default handler for task failures and retries,
which generally kills the command running at QDS for the corresponding task
instance. You can override this behavior by providing your own failure and retry
handler in task definition.
"""
template_fields = ('query', 'script_location', 'sub_command', 'script', 'files',
'archives', 'program', 'cmdline', 'sql', 'where_clause', 'tags',
'extract_query', 'boundary_query', 'macros', 'name', 'parameters',
'dbtap_id', 'hive_table', 'db_table', 'split_column', 'note_id',
'db_update_keys', 'export_dir', 'partition_spec', 'qubole_conn_id',
'arguments', 'user_program_arguments', 'cluster_label')
template_ext = ('.txt',)
ui_color = '#3064A1'
ui_fgcolor = '#fff'
qubole_hook_allowed_args_list = ['command_type', 'qubole_conn_id', 'fetch_logs']
@apply_defaults
def __init__(self, qubole_conn_id="qubole_default", *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.kwargs['qubole_conn_id'] = qubole_conn_id
self.hook = None
filtered_base_kwargs = self._get_filtered_args(kwargs)
super(QuboleOperator, self).__init__(*args, **filtered_base_kwargs)
if self.on_failure_callback is None:
self.on_failure_callback = QuboleHook.handle_failure_retry
if self.on_retry_callback is None:
self.on_retry_callback = QuboleHook.handle_failure_retry
def _get_filtered_args(self, all_kwargs):
qubole_args = flatten_list(COMMAND_ARGS.values()) + HYPHEN_ARGS + \
flatten_list(POSITIONAL_ARGS.values()) + self.qubole_hook_allowed_args_list
return {key: value for key, value in all_kwargs.items() if key not in qubole_args}
def execute(self, context):
return self.get_hook().execute(context)
def on_kill(self, ti=None):
if self.hook:
self.hook.kill(ti)
else:
self.get_hook().kill(ti)
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True):
return self.get_hook().get_results(ti, fp, inline, delim, fetch)
def get_log(self, ti):
return self.get_hook().get_log(ti)
def get_jobs_id(self, ti):
return self.get_hook().get_jobs_id(ti)
def get_hook(self):
# Reinitiating the hook, as some template fields might have changed
return QuboleHook(*self.args, **self.kwargs)
def __getattribute__(self, name):
if name in QuboleOperator.template_fields:
if name in self.kwargs:
return self.kwargs[name]
else:
return ''
else:
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name in QuboleOperator.template_fields:
self.kwargs[name] = value
else:
object.__setattr__(self, name, value)
| apache-2.0 |
laravelproject2016/project | vendor/guzzle/guzzle/docs/conf.py | 469 | 3047 | import sys, os
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
# -- General configuration -----------------------------------------------------
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Guzzle'
copyright = u'2012, Michael Dowling'
version = '3.0.0'
release = '3.0.0'
exclude_patterns = ['_build']
# -- Options for HTML output ---------------------------------------------------
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Guzzle documentation"
html_short_title = "Guzzle"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'leftbar.html', 'searchbox.html']
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'Guzzledoc'
# -- Guzzle Sphinx theme setup ------------------------------------------------
sys.path.insert(0, '/Users/dowling/projects/guzzle_sphinx_theme')
import guzzle_sphinx_theme
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
"index_template": "index.html",
"project_nav_name": "Guzzle",
"github_user": "guzzle",
"github_repo": "guzzle",
"disqus_comments_shortname": "guzzle",
"google_analytics_account": "UA-22752917-1"
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Guzzle.tex', u'Guzzle Documentation',
u'Michael Dowling', 'manual'),
]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'guzzle', u'Guzzle Documentation',
[u'Michael Dowling'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Guzzle', u'Guzzle Documentation',
u'Michael Dowling', 'Guzzle', 'One line description of project.',
'Miscellaneous'),
]
| mit |
SwordGO/SwordGO_app | example/kivymap/.buildozer/venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/_cmd.py | 488 | 1320 | import logging
from pip._vendor import requests
from pip._vendor.cachecontrol.adapter import CacheControlAdapter
from pip._vendor.cachecontrol.cache import DictCache
from pip._vendor.cachecontrol.controller import logger
from argparse import ArgumentParser
def setup_logging():
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
logger.addHandler(handler)
def get_session():
adapter = CacheControlAdapter(
DictCache(),
cache_etags=True,
serializer=None,
heuristic=None,
)
sess = requests.Session()
sess.mount('http://', adapter)
sess.mount('https://', adapter)
sess.cache_controller = adapter.controller
return sess
def get_args():
parser = ArgumentParser()
parser.add_argument('url', help='The URL to try and cache')
return parser.parse_args()
def main(args=None):
args = get_args()
sess = get_session()
# Make a request to get a response
resp = sess.get(args.url)
# Turn on logging
setup_logging()
# try setting the cache
sess.cache_controller.cache_response(resp.request, resp.raw)
# Now try to get it
if sess.cache_controller.cached_request(resp.request):
print('Cached!')
else:
print('Not cached :(')
if __name__ == '__main__':
main()
| gpl-3.0 |
aequitas/home-assistant | homeassistant/components/konnected/handlers.py | 8 | 2240 | """Handle Konnected messages."""
import logging
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.util import decorator
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_STATE,
DEVICE_CLASS_TEMPERATURE, DEVICE_CLASS_HUMIDITY)
from .const import (CONF_INVERSE, SIGNAL_SENSOR_UPDATE, SIGNAL_DS18B20_NEW)
_LOGGER = logging.getLogger(__name__)
HANDLERS = decorator.Registry()
@HANDLERS.register('state')
async def async_handle_state_update(hass, context, msg):
"""Handle a binary sensor state update."""
_LOGGER.debug("[state handler] context: %s msg: %s", context, msg)
entity_id = context.get(ATTR_ENTITY_ID)
state = bool(int(msg.get(ATTR_STATE)))
if context.get(CONF_INVERSE):
state = not state
async_dispatcher_send(
hass, SIGNAL_SENSOR_UPDATE.format(entity_id), state)
@HANDLERS.register('temp')
async def async_handle_temp_update(hass, context, msg):
"""Handle a temperature sensor state update."""
_LOGGER.debug("[temp handler] context: %s msg: %s", context, msg)
entity_id, temp = context.get(DEVICE_CLASS_TEMPERATURE), msg.get('temp')
if entity_id:
async_dispatcher_send(
hass, SIGNAL_SENSOR_UPDATE.format(entity_id), temp)
@HANDLERS.register('humi')
async def async_handle_humi_update(hass, context, msg):
"""Handle a humidity sensor state update."""
_LOGGER.debug("[humi handler] context: %s msg: %s", context, msg)
entity_id, humi = context.get(DEVICE_CLASS_HUMIDITY), msg.get('humi')
if entity_id:
async_dispatcher_send(
hass, SIGNAL_SENSOR_UPDATE.format(entity_id), humi)
@HANDLERS.register('addr')
async def async_handle_addr_update(hass, context, msg):
"""Handle an addressable sensor update."""
_LOGGER.debug("[addr handler] context: %s msg: %s", context, msg)
addr, temp = msg.get('addr'), msg.get('temp')
entity_id = context.get(addr)
if entity_id:
async_dispatcher_send(
hass, SIGNAL_SENSOR_UPDATE.format(entity_id), temp)
else:
msg['device_id'] = context.get('device_id')
msg['temperature'] = temp
msg['addr'] = addr
async_dispatcher_send(hass, SIGNAL_DS18B20_NEW, msg)
| apache-2.0 |
pleaseproject/python-for-android | python3-alpha/python3-src/Lib/ctypes/test/test_funcptr.py | 53 | 3915 | import os, unittest
from ctypes import *
try:
WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
WINFUNCTYPE = CFUNCTYPE
import _ctypes_test
lib = CDLL(_ctypes_test.__file__)
class CFuncPtrTestCase(unittest.TestCase):
def test_basic(self):
X = WINFUNCTYPE(c_int, c_int, c_int)
def func(*args):
return len(args)
x = X(func)
self.assertEqual(x.restype, c_int)
self.assertEqual(x.argtypes, (c_int, c_int))
self.assertEqual(sizeof(x), sizeof(c_voidp))
self.assertEqual(sizeof(X), sizeof(c_voidp))
def test_first(self):
StdCallback = WINFUNCTYPE(c_int, c_int, c_int)
CdeclCallback = CFUNCTYPE(c_int, c_int, c_int)
def func(a, b):
return a + b
s = StdCallback(func)
c = CdeclCallback(func)
self.assertEqual(s(1, 2), 3)
self.assertEqual(c(1, 2), 3)
# The following no longer raises a TypeError - it is now
# possible, as in C, to call cdecl functions with more parameters.
#self.assertRaises(TypeError, c, 1, 2, 3)
self.assertEqual(c(1, 2, 3, 4, 5, 6), 3)
if not WINFUNCTYPE is CFUNCTYPE and os.name != "ce":
self.assertRaises(TypeError, s, 1, 2, 3)
def test_structures(self):
WNDPROC = WINFUNCTYPE(c_long, c_int, c_int, c_int, c_int)
def wndproc(hwnd, msg, wParam, lParam):
return hwnd + msg + wParam + lParam
HINSTANCE = c_int
HICON = c_int
HCURSOR = c_int
LPCTSTR = c_char_p
class WNDCLASS(Structure):
_fields_ = [("style", c_uint),
("lpfnWndProc", WNDPROC),
("cbClsExtra", c_int),
("cbWndExtra", c_int),
("hInstance", HINSTANCE),
("hIcon", HICON),
("hCursor", HCURSOR),
("lpszMenuName", LPCTSTR),
("lpszClassName", LPCTSTR)]
wndclass = WNDCLASS()
wndclass.lpfnWndProc = WNDPROC(wndproc)
WNDPROC_2 = WINFUNCTYPE(c_long, c_int, c_int, c_int, c_int)
# This is no longer true, now that WINFUNCTYPE caches created types internally.
## # CFuncPtr subclasses are compared by identity, so this raises a TypeError:
## self.assertRaises(TypeError, setattr, wndclass,
## "lpfnWndProc", WNDPROC_2(wndproc))
# instead:
self.assertTrue(WNDPROC is WNDPROC_2)
# 'wndclass.lpfnWndProc' leaks 94 references. Why?
self.assertEqual(wndclass.lpfnWndProc(1, 2, 3, 4), 10)
f = wndclass.lpfnWndProc
del wndclass
del wndproc
self.assertEqual(f(10, 11, 12, 13), 46)
def test_dllfunctions(self):
def NoNullHandle(value):
if not value:
raise WinError()
return value
strchr = lib.my_strchr
strchr.restype = c_char_p
strchr.argtypes = (c_char_p, c_char)
self.assertEqual(strchr(b"abcdefghi", b"b"), b"bcdefghi")
self.assertEqual(strchr(b"abcdefghi", b"x"), None)
strtok = lib.my_strtok
strtok.restype = c_char_p
# Neither of this does work: strtok changes the buffer it is passed
## strtok.argtypes = (c_char_p, c_char_p)
## strtok.argtypes = (c_string, c_char_p)
def c_string(init):
size = len(init) + 1
return (c_char*size)(*init)
s = b"a\nb\nc"
b = c_string(s)
## b = (c_char * (len(s)+1))()
## b.value = s
## b = c_string(s)
self.assertEqual(strtok(b, b"\n"), b"a")
self.assertEqual(strtok(None, b"\n"), b"b")
self.assertEqual(strtok(None, b"\n"), b"c")
self.assertEqual(strtok(None, b"\n"), None)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
sparkmorry/flask | scripts/make-release.py | 11 | 4029 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
make-release
~~~~~~~~~~~~
Helper script that performs a release. Does pretty much everything
automatically for us.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import re
from datetime import datetime, date
from subprocess import Popen, PIPE
_date_clean_re = re.compile(r'(\d+)(st|nd|rd|th)')
def parse_changelog():
with open('CHANGES') as f:
lineiter = iter(f)
for line in lineiter:
match = re.search('^Version\s+(.*)', line.strip())
if match is None:
continue
length = len(match.group(1))
version = match.group(1).strip()
if lineiter.next().count('-') != len(match.group(0)):
continue
while 1:
change_info = lineiter.next().strip()
if change_info:
break
match = re.search(r'released on (\w+\s+\d+\w+\s+\d+)'
r'(?:, codename (.*))?(?i)', change_info)
if match is None:
continue
datestr, codename = match.groups()
return version, parse_date(datestr), codename
def bump_version(version):
try:
parts = map(int, version.split('.'))
except ValueError:
fail('Current version is not numeric')
parts[-1] += 1
return '.'.join(map(str, parts))
def parse_date(string):
string = _date_clean_re.sub(r'\1', string)
return datetime.strptime(string, '%B %d %Y')
def set_filename_version(filename, version_number, pattern):
changed = []
def inject_version(match):
before, old, after = match.groups()
changed.append(True)
return before + version_number + after
with open(filename) as f:
contents = re.sub(r"^(\s*%s\s*=\s*')(.+?)(')(?sm)" % pattern,
inject_version, f.read())
if not changed:
fail('Could not find %s in %s', pattern, filename)
with open(filename, 'w') as f:
f.write(contents)
def set_init_version(version):
info('Setting __init__.py version to %s', version)
set_filename_version('flask/__init__.py', version, '__version__')
def set_setup_version(version):
info('Setting setup.py version to %s', version)
set_filename_version('setup.py', version, 'version')
def build_and_upload():
Popen([sys.executable, 'setup.py', 'release', 'sdist', 'upload']).wait()
def fail(message, *args):
print >> sys.stderr, 'Error:', message % args
sys.exit(1)
def info(message, *args):
print >> sys.stderr, message % args
def get_git_tags():
return set(Popen(['git', 'tag'], stdout=PIPE).communicate()[0].splitlines())
def git_is_clean():
return Popen(['git', 'diff', '--quiet']).wait() == 0
def make_git_commit(message, *args):
message = message % args
Popen(['git', 'commit', '-am', message]).wait()
def make_git_tag(tag):
info('Tagging "%s"', tag)
Popen(['git', 'tag', tag]).wait()
def main():
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
rv = parse_changelog()
if rv is None:
fail('Could not parse changelog')
version, release_date, codename = rv
dev_version = bump_version(version) + '-dev'
info('Releasing %s (codename %s, release date %s)',
version, codename, release_date.strftime('%d/%m/%Y'))
tags = get_git_tags()
if version in tags:
fail('Version "%s" is already tagged', version)
if release_date.date() != date.today():
fail('Release date is not today (%s != %s)')
if not git_is_clean():
fail('You have uncommitted changes in git')
set_init_version(version)
set_setup_version(version)
make_git_commit('Bump version number to %s', version)
make_git_tag(version)
build_and_upload()
set_init_version(dev_version)
set_setup_version(dev_version)
if __name__ == '__main__':
main()
| bsd-3-clause |
TetraEtc/limbo | limbo/plugins/google.py | 1 | 1101 | """!search <query> will return the top google result for that query (!google is an alias)"""
from bs4 import BeautifulSoup
import re
try:
from urllib import quote, unquote
except ImportError:
from urllib.request import quote, unquote
import requests
def google(q):
query = quote(q)
url = "https://encrypted.google.com/search?q={0}".format(query)
soup = BeautifulSoup(requests.get(url).text, "html5lib")
answer = soup.findAll("h3", attrs={"class": "r"})
if not answer:
return ":crying_cat_face: Sorry, google doesn't have an answer for you :crying_cat_face:"
try:
return unquote(re.findall(r"q=(.*?)&", str(answer[0]))[0])
except IndexError:
# in this case there is a first answer without a link, which is a
# google response! Let's grab it and display it to the user.
return ' '.join(answer[0].stripped_strings)
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"!(?:google|search) (.*)", text)
if not match:
return
return google(match[0])
on_bot_message = on_message
| mit |
aterrel/blaze | blaze/api/tests/test_into.py | 1 | 3436 | import unittest
from dynd import nd
import numpy as np
from datashape import dshape
from blaze.api.into import into, discover
import blaze
def skip(test_foo):
return
def skip_if_not(x):
def maybe_a_test_function(test_foo):
if not x:
return
else:
return test_foo
return maybe_a_test_function
class Test_into(unittest.TestCase):
def test_containers(self):
self.assertEqual(into([], (1, 2, 3)),
[1, 2, 3])
self.assertEqual(into((), (1, 2, 3)),
(1, 2, 3))
self.assertEqual(into({}, [(1, 2), (3, 4)]),
{1: 2, 3: 4})
self.assertEqual(into((), {1: 2, 3: 4}),
((1, 2), (3, 4)))
self.assertEqual(into((), {'cat': 2, 'dog': 4}),
(('cat', 2), ('dog', 4)))
def test_dynd(self):
self.assertEqual(nd.as_py(into(nd.array(), (1, 2, 3))),
nd.as_py(nd.array([1, 2, 3])))
self.assertEqual(into([], nd.array([1, 2])),
[1, 2])
self.assertEqual(into([], nd.array([[1, 2], [3, 4]])),
[[1, 2], [3, 4]])
def test_numpy(self):
assert (into(np.array(0), [1, 2]) == np.array([1, 2])).all()
self.assertEqual(into([], np.array([1, 2])),
[1, 2])
def test_type(self):
self.assertEqual(into(list, (1, 2, 3)),
into([], (1, 2, 3)))
self.assertEqual(str(into(np.ndarray, (1, 2, 3))),
str(into(np.ndarray(()), (1, 2, 3))))
try:
from pandas import DataFrame
except ImportError:
DataFrame = None
try:
from blaze.data.python import Python
except ImportError:
Python = None
@skip_if_not(DataFrame and Python)
def test_pandas_data_descriptor():
data = [['Alice', 100], ['Bob', 200]]
schema='{name: string, amount: int}'
dd = Python(data, schema=schema)
result = into(DataFrame, dd)
expected = DataFrame(data, columns=['name', 'amount'])
print(result)
print(expected)
assert str(result) == str(expected)
@skip_if_not(DataFrame and nd.array)
def test_pandas_dynd():
data = [['Alice', 100], ['Bob', 200]]
schema='{name: string, amount: int}'
arr = nd.array(data, dtype=schema)
result = into(DataFrame, arr)
expected = DataFrame(data, columns=['name', 'amount'])
print(result)
print(expected)
assert str(result) == str(expected)
@skip_if_not(DataFrame)
def test_pandas_seq():
assert str(into(DataFrame, [1, 2])) == \
str(DataFrame([1, 2]))
assert str(into(DataFrame, (1, 2))) == \
str(DataFrame([1, 2]))
assert str(into(DataFrame(columns=['a', 'b']), [(1, 2), (3, 4)])) == \
str(DataFrame([[1, 2], [3, 4]], columns=['a', 'b']))
@skip_if_not(DataFrame)
def test_discover_pandas():
data = [['Alice', 100], ['Bob', 200]]
df = DataFrame(data, columns=['name', 'balance'])
print(discover(df))
assert discover(df).subshape[0] == dshape('{name: string, balance: int64}')
@skip_if_not(DataFrame and nd.array)
def test_discover_pandas():
data = [('Alice', 100), ('Bob', 200)]
df = DataFrame(data, columns=['name', 'balance'])
result = into(nd.array, df)
assert nd.as_py(result, tuple=True) == data
| bsd-3-clause |
BenLand100/rat-pac | python/SCons/Tool/masm.py | 19 | 2990 | """SCons.Tool.masm
Tool-specific initialization for the Microsoft Assembler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/masm.py 4043 2009/02/23 09:06:45 scons"
import SCons.Defaults
import SCons.Tool
import SCons.Util
ASSuffixes = ['.s', '.asm', '.ASM']
ASPPSuffixes = ['.spp', '.SPP', '.sx']
if SCons.Util.case_sensitive_suffixes('.s', '.S'):
ASPPSuffixes.extend(['.S'])
else:
ASSuffixes.extend(['.S'])
def generate(env):
"""Add Builders and construction variables for masm to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in ASSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASAction)
shared_obj.add_action(suffix, SCons.Defaults.ASAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
for suffix in ASPPSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASPPAction)
shared_obj.add_action(suffix, SCons.Defaults.ASPPAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
env['AS'] = 'ml'
env['ASFLAGS'] = SCons.Util.CLVar('/nologo')
env['ASPPFLAGS'] = '$ASFLAGS'
env['ASCOM'] = '$AS $ASFLAGS /c /Fo$TARGET $SOURCES'
env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c /Fo$TARGET $SOURCES'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
def exists(env):
return env.Detect('ml')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
yajiedesign/mxnet | python/mxnet/gluon/loss.py | 2 | 42891 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=arguments-differ
""" losses for training neural networks """
__all__ = ['Loss', 'L2Loss', 'L1Loss',
'SigmoidBinaryCrossEntropyLoss', 'SigmoidBCELoss',
'SoftmaxCrossEntropyLoss', 'SoftmaxCELoss',
'KLDivLoss', 'CTCLoss', 'HuberLoss', 'HingeLoss',
'SquaredHingeLoss', 'LogisticLoss', 'TripletLoss', 'PoissonNLLLoss', 'CosineEmbeddingLoss', 'SDMLLoss']
import numpy as np
from .. import ndarray
from ..base import numeric_types
from .block import HybridBlock
from ..util import is_np_array
def _apply_weighting(F, loss, weight=None, sample_weight=None):
"""Apply weighting to loss.
Parameters
----------
loss : Symbol
The loss to be weighted.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch separately, `sample_weight` should have
shape (64, 1).
Returns
-------
loss : Symbol
Weighted loss
"""
if sample_weight is not None:
if is_np_array():
loss = loss * sample_weight
else:
loss = F.broadcast_mul(loss, sample_weight)
if weight is not None:
assert isinstance(weight, numeric_types), "weight must be a number"
loss = loss * weight
return loss
def _reshape_like(F, x, y):
"""Reshapes x to the same shape as y."""
if F is ndarray:
return x.reshape(y.shape)
elif is_np_array():
F = F.npx
return F.reshape_like(x, y)
def _batch_mean(F, loss, batch_axis):
"""Return mean on the specified batch axis, not keeping the axis"""
if is_np_array():
if F is ndarray:
axes = list(range(loss.ndim))
del axes[batch_axis]
return F.np.mean(loss, axis=axes)
else:
assert batch_axis == 0, 'Currently, we have not supported the "exclude" ' \
'flag in mean. So we only support batch_axis=0.'
return F.npx.batch_flatten(loss).mean(axis=1)
else:
return F.mean(loss, axis=batch_axis, exclude=True)
def _batch_sum(F, loss, batch_axis):
"""Return sum on the specified batch axis, not keeping the axis"""
if is_np_array():
if F is ndarray:
axes = list(range(loss.ndim))
del axes[batch_axis]
return F.np.sum(loss, axis=axes)
else:
assert batch_axis == 0, 'Currently, we have not supported the "exclude" ' \
'flag in mean. So we only support batch_axis=0.'
return F.npx.batch_flatten(loss).sum(axis=1)
else:
return F.sum(loss, axis=batch_axis, exclude=True)
class Loss(HybridBlock):
"""Base class for loss.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, weight, batch_axis, **kwargs):
super(Loss, self).__init__(**kwargs)
self._weight = weight
self._batch_axis = batch_axis
def __repr__(self):
s = '{name}(batch_axis={_batch_axis}, w={_weight})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def hybrid_forward(self, F, x, *args, **kwargs):
"""Overrides to construct symbolic graph for this `Block`.
Parameters
----------
x : Symbol or NDArray
The first input tensor.
*args : list of Symbol or list of NDArray
Additional input tensors.
"""
# pylint: disable= invalid-name
raise NotImplementedError
class L2Loss(Loss):
r"""Calculates the mean squared error between `label` and `pred`.
.. math:: L = \frac{1}{2} \sum_i \vert {label}_i - {pred}_i \vert^2.
`label` and `pred` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: target tensor with the same size as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, weight=1., batch_axis=0, **kwargs):
super(L2Loss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, pred, label, sample_weight=None):
square_fn = F.np.square if is_np_array() else F.square
label = _reshape_like(F, label, pred)
loss = square_fn(label - pred)
loss = _apply_weighting(F, loss, self._weight / 2, sample_weight)
return _batch_mean(F, loss, self._batch_axis)
class L1Loss(Loss):
r"""Calculates the mean absolute error between `label` and `pred`.
.. math:: L = \sum_i \vert {label}_i - {pred}_i \vert.
`label` and `pred` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: target tensor with the same size as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, weight=None, batch_axis=0, **kwargs):
super(L1Loss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, pred, label, sample_weight=None):
abs_fn = F.np.abs if is_np_array() else F.abs
label = _reshape_like(F, label, pred)
loss = abs_fn(label - pred)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return _batch_mean(F, loss, self._batch_axis)
class SigmoidBinaryCrossEntropyLoss(Loss):
r"""The cross-entropy loss for binary classification. (alias: SigmoidBCELoss)
BCE loss is useful when training logistic regression. If `from_sigmoid`
is False (default), this loss computes:
.. math::
prob = \frac{1}{1 + \exp(-{pred})}
L = - \sum_i {label}_i * \log({prob}_i) * pos\_weight +
(1 - {label}_i) * \log(1 - {prob}_i)
If `from_sigmoid` is True, this loss computes:
.. math::
L = - \sum_i {label}_i * \log({pred}_i) * pos\_weight +
(1 - {label}_i) * \log(1 - {pred}_i)
A tensor `pos_weight > 1` decreases the false negative count, hence increasing
the recall.
Conversely setting `pos_weight < 1` decreases the false positive count and
increases the precision.
`pred` and `label` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
from_sigmoid : bool, default is `False`
Whether the input is from the output of sigmoid. Set this to false will make
the loss calculate sigmoid and BCE together, which is more numerically
stable through log-sum-exp trick.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: target tensor with values in range `[0, 1]`. Must have the
same size as `pred`.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
- **pos_weight**: a weighting tensor of positive examples. Must be a vector with length
equal to the number of classes.For example, if pred has shape (64, 10),
pos_weight should have shape (1, 10).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, from_sigmoid=False, weight=None, batch_axis=0, **kwargs):
super(SigmoidBinaryCrossEntropyLoss, self).__init__(
weight, batch_axis, **kwargs)
self._from_sigmoid = from_sigmoid
def hybrid_forward(self, F, pred, label, sample_weight=None, pos_weight=None):
if is_np_array():
relu_fn = F.npx.relu
act_fn = F.npx.activation
abs_fn = F.np.abs
mul_fn = F.np.multiply
log_fn = F.np.log
else:
relu_fn = F.relu
act_fn = F.Activation
abs_fn = F.abs
mul_fn = F.broadcast_mul
log_fn = F.log
label = _reshape_like(F, label, pred)
if not self._from_sigmoid:
if pos_weight is None:
# We use the stable formula: max(x, 0) - x * z + log(1 + exp(-abs(x)))
loss = relu_fn(pred) - pred * label + \
act_fn(-abs_fn(pred), act_type='softrelu')
else:
# We use the stable formula: x - x * z + (1 + z * pos_weight - z) * \
# (log(1 + exp(-abs(x))) + max(-x, 0))
log_weight = 1 + mul_fn(pos_weight - 1, label)
loss = pred - pred * label + log_weight * \
(act_fn(-abs_fn(pred), act_type='softrelu') + relu_fn(-pred))
else:
eps = 1e-12
if pos_weight is None:
loss = -(log_fn(pred + eps) * label
+ log_fn(1. - pred + eps) * (1. - label))
else:
loss = -(mul_fn(log_fn(pred + eps) * label, pos_weight)
+ log_fn(1. - pred + eps) * (1. - label))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return _batch_mean(F, loss, self._batch_axis)
SigmoidBCELoss = SigmoidBinaryCrossEntropyLoss
class SoftmaxCrossEntropyLoss(Loss):
r"""Computes the softmax cross entropy loss. (alias: SoftmaxCELoss)
If `sparse_label` is `True` (default), label should contain integer
category indicators:
.. math::
\DeclareMathOperator{softmax}{softmax}
p = \softmax({pred})
L = -\sum_i \log p_{i,{label}_i}
`label`'s shape should be `pred`'s shape with the `axis` dimension removed.
i.e. for `pred` with shape (1,2,3,4) and `axis = 2`, `label`'s shape should
be (1,2,4).
If `sparse_label` is `False`, `label` should contain probability distribution
and `label`'s shape should be the same with `pred`:
.. math::
p = \softmax({pred})
L = -\sum_i \sum_j {label}_j \log p_{ij}
Parameters
----------
axis : int, default -1
The axis to sum over when computing softmax and entropy.
sparse_label : bool, default True
Whether label is an integer array instead of probability distribution.
from_logits : bool, default False
Whether input is a log probability (usually from log_softmax) instead
of unnormalized numbers.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: the prediction tensor, where the `batch_axis` dimension
ranges over batch size and `axis` dimension ranges over the number
of classes.
- **label**: the truth tensor. When `sparse_label` is True, `label`'s
shape should be `pred`'s shape with the `axis` dimension removed.
i.e. for `pred` with shape (1,2,3,4) and `axis = 2`, `label`'s shape
should be (1,2,4) and values should be integers between 0 and 2. If
`sparse_label` is False, `label`'s shape must be the same as `pred`
and values should be floats in the range `[0, 1]`.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, axis=-1, sparse_label=True, from_logits=False, weight=None,
batch_axis=0, **kwargs):
super(SoftmaxCrossEntropyLoss, self).__init__(
weight, batch_axis, **kwargs)
self._axis = axis
self._sparse_label = sparse_label
self._from_logits = from_logits
def hybrid_forward(self, F, pred, label, sample_weight=None):
if is_np_array():
log_softmax_fn = F.npx.log_softmax
pick_fn = F.npx.pick
else:
log_softmax_fn = F.log_softmax
pick_fn = F.pick
if not self._from_logits:
pred = log_softmax_fn(pred, self._axis)
if self._sparse_label:
loss = -pick_fn(pred, label, axis=self._axis, keepdims=True)
else:
label = _reshape_like(F, label, pred)
loss = -(pred * label).sum(axis=self._axis, keepdims=True)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return _batch_mean(F, loss, self._batch_axis)
SoftmaxCELoss = SoftmaxCrossEntropyLoss
class KLDivLoss(Loss):
r"""The Kullback-Leibler divergence loss.
KL divergence measures the distance between contiguous distributions. It
can be used to minimize information loss when approximating a distribution.
If `from_logits` is True (default), loss is defined as:
.. math::
L = \sum_i {label}_i * \big[\log({label}_i) - {pred}_i\big]
If `from_logits` is False, loss is defined as:
.. math::
\DeclareMathOperator{softmax}{softmax}
prob = \softmax({pred})
L = \sum_i {label}_i * \big[\log({label}_i) - \log({prob}_i)\big]
`label` and `pred` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
from_logits : bool, default is `True`
Whether the input is log probability (usually from log_softmax) instead
of unnormalized numbers.
axis : int, default -1
The dimension along with to compute softmax. Only used when `from_logits`
is False.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape. If `from_logits` is
True, `pred` should be log probabilities. Otherwise, it should be
unnormalized predictions, i.e. from a dense layer.
- **label**: truth tensor with values in range `(0, 1)`. Must have
the same size as `pred`.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
References
----------
`Kullback-Leibler divergence
<https://en.wikipedia.org/wiki/Kullback-Leibler_divergence>`_
"""
def __init__(self, from_logits=True, axis=-1, weight=None, batch_axis=0,
**kwargs):
super(KLDivLoss, self).__init__(weight, batch_axis, **kwargs)
self._from_logits = from_logits
self._axis = axis
def hybrid_forward(self, F, pred, label, sample_weight=None):
if is_np_array():
log_softmax_fn = F.npx.log_softmax
log_fn = F.np.log
else:
log_softmax_fn = F.log_softmax
log_fn = F.log
if not self._from_logits:
pred = log_softmax_fn(pred, self._axis)
loss = label * (log_fn(label + 1e-12) - pred)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return _batch_mean(F, loss, self._batch_axis)
class CTCLoss(Loss):
r"""Connectionist Temporal Classification Loss.
Parameters
----------
layout : str, default 'NTC'
Layout of prediction tensor. 'N', 'T', 'C' stands for batch size,
sequence length, and alphabet_size respectively.
label_layout : str, default 'NT'
Layout of the labels. 'N', 'T' stands for batch size, and sequence
length respectively.
weight : float or None
Global scalar weight for loss.
Inputs:
- **pred**: unnormalized prediction tensor (before softmax).
Its shape depends on `layout`. If `layout` is 'TNC', pred
should have shape `(sequence_length, batch_size, alphabet_size)`.
Note that in the last dimension, index `alphabet_size-1` is reserved
for internal use as blank label. So `alphabet_size` is one plus the
actual alphabet size.
- **label**: zero-based label tensor. Its shape depends on `label_layout`.
If `label_layout` is 'TN', `label` should have shape
`(label_sequence_length, batch_size)`.
- **pred_lengths**: optional (default None), used for specifying the
length of each entry when different `pred` entries in the same batch
have different lengths. `pred_lengths` should have shape `(batch_size,)`.
- **label_lengths**: optional (default None), used for specifying the
length of each entry when different `label` entries in the same batch
have different lengths. `label_lengths` should have shape `(batch_size,)`.
Outputs:
- **loss**: output loss has shape `(batch_size,)`.
**Example**: suppose the vocabulary is `[a, b, c]`, and in one batch we
have three sequences 'ba', 'cbb', and 'abac'. We can index the labels as
`{'a': 0, 'b': 1, 'c': 2, blank: 3}`. Then `alphabet_size` should be 4,
where label 3 is reserved for internal use by `CTCLoss`. We then need to
pad each sequence with `-1` to make a rectangular `label` tensor::
[[1, 0, -1, -1],
[2, 1, 1, -1],
[0, 1, 0, 2]]
References
----------
`Connectionist Temporal Classification: Labelling Unsegmented
Sequence Data with Recurrent Neural Networks
<http://www.cs.toronto.edu/~graves/icml_2006.pdf>`_
"""
def __init__(self, layout='NTC', label_layout='NT', weight=None, **kwargs):
assert layout in ['NTC', 'TNC'],\
"Only 'NTC' and 'TNC' layouts for pred are supported. Got: %s" % layout
assert label_layout in ['NT', 'TN'],\
"Only 'NT' and 'TN' layouts for label are supported. Got: %s" % label_layout
self._layout = layout
self._label_layout = label_layout
batch_axis = label_layout.find('N')
super(CTCLoss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, pred, label,
pred_lengths=None, label_lengths=None, sample_weight=None):
if is_np_array():
swapaxes_fn = F.np.swapaxes
ctc_fn = F.npx.ctc_loss
else:
swapaxes_fn = F.swapaxes
ctc_fn = F.ctc_loss
if self._layout == 'NTC':
pred = swapaxes_fn(pred, 0, 1)
if self._batch_axis == 1:
label = swapaxes_fn(label, 0, 1)
loss = ctc_fn(pred, label, pred_lengths, label_lengths,
use_data_lengths=pred_lengths is not None,
use_label_lengths=label_lengths is not None,
blank_label='last')
return _apply_weighting(F, loss, self._weight, sample_weight)
class HuberLoss(Loss):
r"""Calculates smoothed L1 loss that is equal to L1 loss if absolute error
exceeds rho but is equal to L2 loss otherwise. Also called SmoothedL1 loss.
.. math::
L = \sum_i \begin{cases} \frac{1}{2 {rho}} ({label}_i - {pred}_i)^2 &
\text{ if } |{label}_i - {pred}_i| < {rho} \\
|{label}_i - {pred}_i| - \frac{{rho}}{2} &
\text{ otherwise }
\end{cases}
`label` and `pred` can have arbitrary shape as long as they have the same
number of elements.
Parameters
----------
rho : float, default 1
Threshold for trimmed mean estimator.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: target tensor with the same size as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, rho=1, weight=None, batch_axis=0, **kwargs):
super(HuberLoss, self).__init__(weight, batch_axis, **kwargs)
self._rho = rho
def hybrid_forward(self, F, pred, label, sample_weight=None):
if is_np_array():
abs_fn = F.np.abs
where_fn = F.np.where
square_fn = F.np.square
else:
abs_fn = F.abs
where_fn = F.where
square_fn = F.square
label = _reshape_like(F, label, pred)
loss = abs_fn(label - pred)
loss = where_fn(loss > self._rho, loss - 0.5 * self._rho,
(0.5 / self._rho) * square_fn(loss))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return _batch_mean(F, loss, self._batch_axis)
class HingeLoss(Loss):
r"""Calculates the hinge loss function often used in SVMs:
.. math::
L = \sum_i max(0, {margin} - {pred}_i \cdot {label}_i)
where `pred` is the classifier prediction and `label` is the target tensor
containing values -1 or 1. `label` and `pred` must have the same number of
elements.
Parameters
----------
margin : float
The margin in hinge loss. Defaults to 1.0
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape.
- **label**: truth tensor with values -1 or 1. Must have the same size
as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, margin=1, weight=None, batch_axis=0, **kwargs):
super(HingeLoss, self).__init__(weight, batch_axis, **kwargs)
self._margin = margin
def hybrid_forward(self, F, pred, label, sample_weight=None):
relu_fn = F.npx.relu if is_np_array() else F.relu
label = _reshape_like(F, label, pred)
loss = relu_fn(self._margin - pred * label)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return _batch_mean(F, loss, self._batch_axis)
class SquaredHingeLoss(Loss):
r"""Calculates the soft-margin loss function used in SVMs:
.. math::
L = \sum_i max(0, {margin} - {pred}_i \cdot {label}_i)^2
where `pred` is the classifier prediction and `label` is the target tensor
containing values -1 or 1. `label` and `pred` can have arbitrary shape as
long as they have the same number of elements.
Parameters
----------
margin : float
The margin in hinge loss. Defaults to 1.0
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **label**: truth tensor with values -1 or 1. Must have the same size
as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, margin=1, weight=None, batch_axis=0, **kwargs):
super(SquaredHingeLoss, self).__init__(weight, batch_axis, **kwargs)
self._margin = margin
def hybrid_forward(self, F, pred, label, sample_weight=None):
if is_np_array():
relu_fn = F.npx.relu
square_fn = F.np.square
else:
relu_fn = F.relu
square_fn = F.square
label = _reshape_like(F, label, pred)
loss = square_fn(relu_fn(self._margin - pred * label))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return _batch_mean(F, loss, self._batch_axis)
class LogisticLoss(Loss):
r"""Calculates the logistic loss (for binary losses only):
.. math::
L = \sum_i \log(1 + \exp(- {pred}_i \cdot {label}_i))
where `pred` is the classifier prediction and `label` is the target tensor
containing values -1 or 1 (0 or 1 if `label_format` is binary).
`label` and `pred` can have arbitrary shape as long as they have the same number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
label_format : str, default 'signed'
Can be either 'signed' or 'binary'. If the label_format is 'signed', all label values should
be either -1 or 1. If the label_format is 'binary', all label values should be either
0 or 1.
Inputs:
- **pred**: prediction tensor with arbitrary shape.
- **label**: truth tensor with values -1/1 (label_format is 'signed')
or 0/1 (label_format is 'binary'). Must have the same size as pred.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.
"""
def __init__(self, weight=None, batch_axis=0, label_format='signed', **kwargs):
super(LogisticLoss, self).__init__(weight, batch_axis, **kwargs)
self._label_format = label_format
if self._label_format not in ["signed", "binary"]:
raise ValueError("label_format can only be signed or binary, recieved %s."
% label_format)
def hybrid_forward(self, F, pred, label, sample_weight=None):
if is_np_array():
relu_fn = F.npx.relu
act_fn = F.npx.activation
abs_fn = F.np.abs
else:
relu_fn = F.relu
act_fn = F.Activation
abs_fn = F.abs
label = _reshape_like(F, label, pred)
if self._label_format == 'signed':
label = (label + 1.0) / 2.0 # Transform label to be either 0 or 1
# Use a stable formula in computation
loss = relu_fn(pred) - pred * label + \
act_fn(-abs_fn(pred), act_type='softrelu')
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return _batch_mean(F, loss, self._batch_axis)
class TripletLoss(Loss):
r"""Calculates triplet loss given three input tensors and a positive margin.
Triplet loss measures the relative similarity between a positive
example, a negative example, and prediction:
.. math::
L = \sum_i \max(\Vert {pos_i}_i - {pred} \Vert_2^2 -
\Vert {neg_i}_i - {pred} \Vert_2^2 + {margin}, 0)
`positive`, `negative`, and 'pred' can have arbitrary shape as long as they
have the same number of elements.
Parameters
----------
margin : float
Margin of separation between correct and incorrect pair.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **pred**: prediction tensor with arbitrary shape
- **positive**: positive example tensor with arbitrary shape. Must have
the same size as pred.
- **negative**: negative example tensor with arbitrary shape Must have
the same size as pred.
Outputs:
- **loss**: loss tensor with shape (batch_size,).
"""
def __init__(self, margin=1, weight=None, batch_axis=0, **kwargs):
super(TripletLoss, self).__init__(weight, batch_axis, **kwargs)
self._margin = margin
def hybrid_forward(self, F, pred, positive, negative, sample_weight=None):
if is_np_array():
relu_fn = F.npx.relu
square_fn = F.np.square
else:
relu_fn = F.relu
square_fn = F.square
positive = _reshape_like(F, positive, pred)
negative = _reshape_like(F, negative, pred)
loss = _batch_sum(F, square_fn(positive - pred) - square_fn(negative - pred), self._batch_axis)
loss = relu_fn(loss + self._margin)
return _apply_weighting(F, loss, self._weight, sample_weight)
class PoissonNLLLoss(Loss):
r"""For a target (Random Variable) in a Poisson distribution, the function calculates the Negative
Log likelihood loss.
PoissonNLLLoss measures the loss accrued from a poisson regression prediction made by the model.
.. math::
L = \text{pred} - \text{target} * \log(\text{pred}) +\log(\text{target!})
`target`, 'pred' can have arbitrary shape as long as they have the same number of elements.
Parameters
----------
from_logits : boolean, default True
indicating whether log(predicted) value has already been computed. If True, the loss is computed as
:math:`\exp(\text{pred}) - \text{target} * \text{pred}`, and if False, then loss is computed as
:math:`\text{pred} - \text{target} * \log(\text{pred}+\text{epsilon})`.The default value
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
compute_full: boolean, default False
Indicates whether to add an approximation(Stirling factor) for the Factorial term in the formula for the loss.
The Stirling factor is:
:math:`\text{target} * \log(\text{target}) - \text{target} + 0.5 * \log(2 * \pi * \text{target})`
epsilon: float, default 1e-08
This is to avoid calculating log(0) which is not defined.
Inputs:
- **pred**: Predicted value
- **target**: Random variable(count or number) which belongs to a Poisson distribution.
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: Average loss (shape=(1,1)) of the loss tensor with shape (batch_size,).
"""
def __init__(self, weight=None, from_logits=True, batch_axis=0, compute_full=False, **kwargs):
super(PoissonNLLLoss, self).__init__(weight, batch_axis, **kwargs)
self._from_logits = from_logits
self._compute_full = compute_full
def hybrid_forward(self, F, pred, target, sample_weight=None, epsilon=1e-08):
if is_np_array():
exp_fn = F.np.exp
log_fn = F.np.log
else:
exp_fn = F.exp
log_fn = F.log
target = _reshape_like(F, target, pred)
if self._from_logits:
loss = exp_fn(pred) - target * pred
else:
loss = pred - target * log_fn(pred + epsilon)
if self._compute_full:
# Using numpy's pi value
stirling_factor = target * \
log_fn(target) - target + 0.5 * log_fn(2 * target * np.pi)
target_gt_1 = target > 1
stirling_factor = stirling_factor * target_gt_1
loss = loss + stirling_factor
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return _batch_mean(F, loss, self._batch_axis)
class CosineEmbeddingLoss(Loss):
r"""For a target label 1 or -1, vectors input1 and input2, the function computes the cosine distance
between the vectors. This can be interpreted as how similar/dissimilar two input vectors are.
.. math::
L = \sum_i \begin{cases} 1 - {cos\_sim({input1}_i, {input2}_i)} & \text{ if } {label}_i = 1\\
{cos\_sim({input1}_i, {input2}_i)} & \text{ if } {label}_i = -1 \end{cases}\\
cos\_sim(input1, input2) = \frac{{input1}_i.{input2}_i}{||{input1}_i||.||{input2}_i||}
`input1`, `input2` can have arbitrary shape as long as they have the same number of elements.
Parameters
----------
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
margin : float
Margin of separation between correct and incorrect pair.
Inputs:
- **input1**: a tensor with arbitrary shape
- **input2**: another tensor with same shape as pred to which input1 is
compared for similarity and loss calculation
- **label**: A 1-D tensor indicating for each pair input1 and input2, target label is 1 or -1
- **sample_weight**: element-wise weighting tensor. Must be broadcastable
to the same shape as input1. For example, if input1 has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).
Outputs:
- **loss**: The loss tensor with shape (batch_size,).
"""
def __init__(self, weight=None, batch_axis=0, margin=0, **kwargs):
super(CosineEmbeddingLoss, self).__init__(weight, batch_axis, **kwargs)
self._margin = margin
def hybrid_forward(self, F, input1, input2, label, sample_weight=None):
if is_np_array():
where_fn = F.np.where
clip_fn = F.np.clip
else:
where_fn = F.where
clip_fn = F.clip
input1 = _reshape_like(F, input1, input2)
cos_sim = self._cosine_similarity(F, input1, input2)
label = _reshape_like(F, label, cos_sim)
loss = where_fn(label == 1,
1 - cos_sim,
clip_fn(cos_sim - self._margin, 0, 1 - self._margin))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return _batch_mean(F, loss, self._batch_axis)
def _cosine_similarity(self, F, x, y, axis=-1):
if is_np_array():
reshape_fn = F.npx.reshape
norm_fn = F.npx.norm
sum_fn = F.np.sum
full_fn = F.np.full
max_fn = F.np.maximum
else:
reshape_fn = F.reshape
norm_fn = F.norm
sum_fn = F.sum
full_fn = F.full
max_fn = F.broadcast_maximum
# Calculates the cosine similarity between 2 vectors
x_norm = reshape_fn(norm_fn(x, axis=axis), (-1, 1))
y_norm = reshape_fn(norm_fn(y, axis=axis), (-1, 1))
x_dot_y = reshape_fn(sum_fn(x * y, axis=axis), (-1, 1))
eps_arr = full_fn((1, 1), 1e-12)
return (x_dot_y / max_fn(x_norm * y_norm, eps_arr))
class SDMLLoss(Loss):
r"""Calculates Batchwise Smoothed Deep Metric Learning (SDML) Loss given two input tensors and a smoothing weight
SDM Loss learns similarity between paired samples by using unpaired samples in the minibatch
as potential negative examples.
The loss is described in greater detail in
"Large Scale Question Paraphrase Retrieval with Smoothed Deep Metric Learning."
- by Bonadiman, Daniele, Anjishnu Kumar, and Arpit Mittal. arXiv preprint arXiv:1905.12786 (2019).
URL: https://arxiv.org/pdf/1905.12786.pdf
According to the authors, this loss formulation achieves comparable or higher accuracy to
Triplet Loss but converges much faster.
The loss assumes that the items in both tensors in each minibatch
are aligned such that x1[0] corresponds to x2[0] and all other datapoints in the minibatch are unrelated.
`x1` and `x2` are minibatches of vectors.
Parameters
----------
smoothing_parameter : float
Probability mass to be distributed over the minibatch. Must be < 1.0.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
Inputs:
- **x1**: Minibatch of data points with shape (batch_size, vector_dim)
- **x2**: Minibatch of data points with shape (batch_size, vector_dim)
Each item in x2 is a positive sample for the same index in x1.
That is, x1[0] and x2[0] form a positive pair, x1[1] and x2[1] form a positive pair - and so on.
All data points in different rows should be decorrelated
Outputs:
- **loss**: loss tensor with shape (batch_size,).
"""
def __init__(self, smoothing_parameter=0.3, weight=1., batch_axis=0, **kwargs):
super(SDMLLoss, self).__init__(weight, batch_axis, **kwargs)
self.kl_loss = KLDivLoss(from_logits=True)
# Smoothing probability mass
self.smoothing_parameter = smoothing_parameter
def _compute_distances(self, F, x1, x2):
"""
This function computes the euclidean distance between every vector
in the two batches in input.
"""
if is_np_array():
expand_dims_fn = F.np.expand_dims
else:
expand_dims_fn = F.expand_dims
# expanding x1 form [batch_size, dim] to [batch_size, 1, dim]
# and x2 to [1, batch_size, dim]
x1_ = expand_dims_fn(x1, 1)
x2_ = expand_dims_fn(x2, 0)
# pointwise squared differences
squared_diffs = (x1_ - x2_)**2
# sum of squared differences distance
return squared_diffs.sum(axis=2)
def _compute_labels(self, F, batch_size):
"""
The function creates the label matrix for the loss.
It is an identity matrix of size [BATCH_SIZE x BATCH_SIZE]
labels:
[[1, 0]
[0, 1]]
after the proces the labels are smoothed by a small amount to
account for errors.
labels:
[[0.9, 0.1]
[0.1, 0.9]]
Pereyra, Gabriel, et al. "Regularizing neural networks by penalizing
confident output distributions." arXiv preprint arXiv:1701.06548 (2017).
"""
gold = F.eye(batch_size)
labels = gold * (1 - self.smoothing_parameter) + (1 - gold) * self.smoothing_parameter / (batch_size - 1)
return labels
def hybrid_forward(self, F, x1, x2):
"""
the function computes the kl divergence between the negative distances
(internally it compute a softmax casting into probabilities) and the
identity matrix.
This assumes that the two batches are aligned therefore the more similar
vector should be the one having the same id.
Batch1 Batch2
President of France French President
President of US American President
Given the question president of France in batch 1 the model will
learn to predict french president comparing it with all the other
vectors in batch 2
"""
assert F is ndarray, 'SDMLLoss does not support symbolic '
if is_np_array():
log_softmax_fn = F.npx.log_softmax
else:
log_softmax_fn = F.log_softmax
batch_size = x1.shape[0]
labels = self._compute_labels(F, batch_size)
distances = self._compute_distances(F, x1, x2)
log_probabilities = log_softmax_fn(-distances, axis=1)
# multiply for the number of labels to obtain the correct loss (gluon kl_loss averages instead of sum)
# PR#18423:multiply for the number of labels should multiply x1.shape[1] rather than x1.shape[0])
# After PR#18423, it is no need to multiply it anymore.
return self.kl_loss(log_probabilities, labels.as_in_context(distances.context))
| apache-2.0 |
SDSG-Invenio/invenio | invenio/modules/tickets/testsuite/test_system_rt.py | 13 | 2559 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for bibcatalog_system_rt library."""
from invenio.base.wrappers import lazy_import
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
bibcatalog_system_rt = lazy_import('invenio.legacy.bibcatalog.system_rt')
class BibCatalogSystemRTTest(InvenioTestCase):
"""Testing of BibCatalog."""
def setUp(self):
bibcatalog_system_rt.CFG_BIBCATALOG_SYSTEM_RT_DEFAULT_USER = 'testuser'
bibcatalog_system_rt.CFG_BIBCATALOG_SYSTEM_RT_DEFAULT_PWD = 'testpass'
bibcatalog_system_rt.CFG_BIBCATALOG_SYSTEM_RT_URL = 'http://testingdomainbadbad.invenio-software.org'
self.rt = bibcatalog_system_rt.BibCatalogSystemRT()
def tearDown(self):
pass
def test_rt_run_command_fails_with_bum_environment(self):
"""bibcatalog_system_rt - _run_rt_command gives None for bad environment"""
# A special kind of test requires a very weird environment
bibcatalog_system_rt.CFG_BIBCATALOG_SYSTEM_RT_URL = None
testobj = bibcatalog_system_rt.BibCatalogSystemRT()
stdout = testobj._run_rt_command('/bin/ls /')
bibcatalog_system_rt.CFG_BIBCATALOG_SYSTEM_RT_URL = 'http://testingdomainbadbad.invenio-software.org'
self.assertEquals(stdout, None)
def test_rt_run_command(self):
"""bibcatalog_system_rt - running simple command."""
stdout = self.rt._run_rt_command('/bin/ls /')
self.assertTrue(len(stdout) > 0)
def test_rt_run_command_exception_bad_cmd(self):
"""bibcatalog_system_rt - bad command execution raises exception"""
self.assertRaises(ValueError, self.rt._run_rt_command, '/etc/hosts')
TEST_SUITE = make_test_suite(BibCatalogSystemRTTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
joulecoin/joulecoin | contrib/devtools/symbol-check.py | 149 | 4348 | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function
import subprocess
import re
import sys
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'_edata', '_end', '_init', '__bss_start', '_fini'
}
READELF_CMD = '/usr/bin/readelf'
CPPFILT_CMD = '/usr/bin/c++filt'
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split('\n'):
line = line.split()
if len(line)>7 and re.match('[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition('@')
is_import = line[6] == 'UND'
if version.startswith('@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym)))
retval = 1
exit(retval)
| mit |
dimuthud/carbon-platform-integration | deployment-automation-framework/python_scripts/load_deployment_config.py | 5 | 2841 | #!/usr/bin/env python
"""Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
WSO2 Inc. licenses this file to you under the Apache License,
Version 2.0 (the "License"); you may not use this file except
in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
"""This script extracts the required configuration information
from the deployment.cfg config file
This is needed prior spawning instances in OpenStack, EC2, etc...
"""
import ConfigParser
import collections
# Global variables
# allow_no_value set to true since the server list may be
# recorded without any value assigned when env=openstack
config = ConfigParser.RawConfigParser(allow_no_value=True)
read = config.read('deployment.cfg')
# Get environment
def get_environment():
return config.get('environment', 'env')
# Load environment configuration
# OpenStack related configuration parameters
def get_openstack_image():
return config.get('envconfig', 'image')
def get_openstack_flavor():
return config.get('envconfig', 'flavor')
def get_openstack_network():
return config.get('envconfig', 'network')
def get_openstack_instance_password():
return config.get('envconfig', 'instancePassword')
def get_openstack_key_pair():
return config.get('envconfig', 'keyPair')
# Load server list from config file
def load_server_config():
server_list = []
# Put node list in to an ordered dictionary object
# under section [nodes] in deployment.cfg file
ordered_dictionary = collections.OrderedDict(config.items('nodes'))
print ordered_dictionary
# For each node name append to serverList array
# for node, ip in orderedDic.iteritems():
# serverList.append(node)
for node, ip in ordered_dictionary.iteritems():
node_values = node.split(" ")
print node_values # output -> ['elb', '1']
if len(node_values) > 1:
print node_values[0] # output -> elb
server_list.append(node_values[0])
else:
server_list.append(node)
# Return the server list name array
print server_list
return server_list
# This block will only get executed when running directly
# This can be used to test config file structure, data retrieval and experimentation
if __name__ == '__main__':
try:
serverList = load_server_config()
print serverList
except BaseException as b:
print 'Exception in load_deployment_config: ', b
| apache-2.0 |
xbmcmegapack/plugin.video.megapack.dev | resources/lib/menus/home_languages_herero.py | 1 | 1109 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Languages_Herero():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
languages=["Herero"])) | gpl-3.0 |
ver007/stem-1.4.1 | test/unit/descriptor/extrainfo_descriptor.py | 3 | 27230 | """
Unit tests for stem.descriptor.extrainfo_descriptor.
"""
import datetime
import unittest
import stem.descriptor
from stem.descriptor.extrainfo_descriptor import RelayExtraInfoDescriptor, DirResponse, DirStat
from test.mocking import get_relay_extrainfo_descriptor, get_bridge_extrainfo_descriptor, CRYPTO_BLOB
from test.unit.descriptor import get_resource
class TestExtraInfoDescriptor(unittest.TestCase):
def test_metrics_relay_descriptor(self):
"""
Parses and checks our results against an extrainfo descriptor from metrics.
"""
descriptor_file = open(get_resource('extrainfo_relay_descriptor'), 'rb')
expected_signature = """-----BEGIN SIGNATURE-----
K5FSywk7qvw/boA4DQcqkls6Ize5vcBYfhQ8JnOeRQC9+uDxbnpm3qaYN9jZ8myj
k0d2aofcVbHr4fPQOSST0LXDrhFl5Fqo5um296zpJGvRUeO6S44U/EfJAGShtqWw
7LZqklu+gVvhMKREpchVqlAwXkWR44VENm24Hs+mT3M=
-----END SIGNATURE-----"""
desc = next(stem.descriptor.parse_file(descriptor_file, 'extra-info 1.0'))
self.assertEqual('NINJA', desc.nickname)
self.assertEqual('B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48', desc.fingerprint)
self.assertEqual(datetime.datetime(2012, 5, 5, 17, 3, 50), desc.published)
self.assertEqual(datetime.datetime(2012, 5, 5, 17, 2, 45), desc.read_history_end)
self.assertEqual(900, desc.read_history_interval)
self.assertEqual(datetime.datetime(2012, 5, 5, 17, 2, 45), desc.write_history_end)
self.assertEqual(900, desc.write_history_interval)
self.assertEqual(datetime.datetime(2012, 5, 5, 17, 2, 45), desc.dir_read_history_end)
self.assertEqual(900, desc.dir_read_history_interval)
self.assertEqual(datetime.datetime(2012, 5, 5, 17, 2, 45), desc.dir_write_history_end)
self.assertEqual(900, desc.dir_write_history_interval)
self.assertEqual(expected_signature, desc.signature)
self.assertEqual('00A57A9AAB5EA113898E2DD02A755E31AFC27227', desc.digest())
self.assertEqual([], desc.get_unrecognized_lines())
# The read-history, write-history, dirreq-read-history, and
# dirreq-write-history lines are pretty long so just checking
# the initial contents for the line and parsed values.
read_values_start = [3309568, 9216, 41984, 27648, 123904]
self.assertEqual(read_values_start, desc.read_history_values[:5])
write_values_start = [1082368, 19456, 50176, 272384, 485376]
self.assertEqual(write_values_start, desc.write_history_values[:5])
dir_read_values_start = [0, 0, 0, 0, 33792, 27648, 48128]
self.assertEqual(dir_read_values_start, desc.dir_read_history_values[:7])
dir_write_values_start = [0, 0, 0, 227328, 349184, 382976, 738304]
self.assertEqual(dir_write_values_start, desc.dir_write_history_values[:7])
def test_metrics_bridge_descriptor(self):
"""
Parses and checks our results against an extrainfo bridge descriptor from
metrics.
"""
descriptor_file = open(get_resource('extrainfo_bridge_descriptor'), 'rb')
expected_dir_v2_responses = {
DirResponse.OK: 0,
DirResponse.UNAVAILABLE: 0,
DirResponse.NOT_FOUND: 0,
DirResponse.NOT_MODIFIED: 0,
DirResponse.BUSY: 0,
}
expected_dir_v3_responses = {
DirResponse.OK: 72,
DirResponse.NOT_ENOUGH_SIGS: 0,
DirResponse.UNAVAILABLE: 0,
DirResponse.NOT_FOUND: 0,
DirResponse.NOT_MODIFIED: 0,
DirResponse.BUSY: 0,
}
desc = next(stem.descriptor.parse_file(descriptor_file, 'bridge-extra-info 1.0'))
self.assertEqual('ec2bridgereaac65a3', desc.nickname)
self.assertEqual('1EC248422B57D9C0BD751892FE787585407479A4', desc.fingerprint)
self.assertEqual(datetime.datetime(2012, 6, 8, 2, 21, 27), desc.published)
self.assertEqual(datetime.datetime(2012, 6, 8, 2, 10, 38), desc.read_history_end)
self.assertEqual(900, desc.read_history_interval)
self.assertEqual(datetime.datetime(2012, 6, 8, 2, 10, 38), desc.write_history_end)
self.assertEqual(900, desc.write_history_interval)
self.assertEqual(datetime.datetime(2012, 6, 8, 2, 10, 38), desc.dir_read_history_end)
self.assertEqual(900, desc.dir_read_history_interval)
self.assertEqual(datetime.datetime(2012, 6, 8, 2, 10, 38), desc.dir_write_history_end)
self.assertEqual(900, desc.dir_write_history_interval)
self.assertEqual('00A2AECCEAD3FEE033CFE29893387143146728EC', desc.digest())
self.assertEqual([], desc.get_unrecognized_lines())
read_values_start = [337920, 437248, 3995648, 48726016]
self.assertEqual(read_values_start, desc.read_history_values[:4])
write_values_start = [343040, 991232, 5649408, 49548288]
self.assertEqual(write_values_start, desc.write_history_values[:4])
dir_read_values_start = [0, 71680, 99328, 25600]
self.assertEqual(dir_read_values_start, desc.dir_read_history_values[:4])
dir_write_values_start = [5120, 664576, 2419712, 578560]
self.assertEqual(dir_write_values_start, desc.dir_write_history_values[:4])
self.assertEqual({}, desc.dir_v2_requests)
self.assertEqual({}, desc.dir_v3_requests)
self.assertEqual(expected_dir_v2_responses, desc.dir_v2_responses)
self.assertEqual(expected_dir_v3_responses, desc.dir_v3_responses)
self.assertEqual({}, desc.dir_v2_responses_unknown)
self.assertEqual({}, desc.dir_v2_responses_unknown)
def test_multiple_metrics_bridge_descriptors(self):
"""
Check that we can read bridge descriptors when there's multiple in a file.
"""
descriptor_file = open(get_resource('extrainfo_bridge_descriptor_multiple'), 'rb')
desc_list = list(stem.descriptor.parse_file(descriptor_file))
self.assertEqual(6, len(desc_list))
self.assertEqual('909B07DB17E21D263C55794AB815BF1DB195FDD9', desc_list[0].fingerprint)
self.assertEqual('7F7798A3CBB0F643B1CFCE3FD4F2B7C553764498', desc_list[1].fingerprint)
self.assertEqual('B4869206C1EEA4A090FE614155BD6942701F80F1', desc_list[2].fingerprint)
self.assertEqual('C18896EB6274DC8123491FAE1DD17E1769C54C4F', desc_list[3].fingerprint)
self.assertEqual('478B4CB438302981DE9AAF246F48DBE57F69050A', desc_list[4].fingerprint)
self.assertEqual('25D9D52A0350B42E69C8AB7CE945DB1CA38DA0CF', desc_list[5].fingerprint)
def test_minimal_extrainfo_descriptor(self):
"""
Basic sanity check that we can parse an extrainfo descriptor with minimal
attributes.
"""
desc = get_relay_extrainfo_descriptor()
self.assertEqual('ninja', desc.nickname)
self.assertEqual('B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48', desc.fingerprint)
self.assertTrue(CRYPTO_BLOB in desc.signature)
def test_unrecognized_line(self):
"""
Includes unrecognized content in the descriptor.
"""
desc = get_relay_extrainfo_descriptor({'pepperjack': 'is oh so tasty!'})
self.assertEqual(['pepperjack is oh so tasty!'], desc.get_unrecognized_lines())
def test_proceeding_line(self):
"""
Includes a line prior to the 'extra-info' entry.
"""
desc_text = b'exit-streams-opened port=80\n' + get_relay_extrainfo_descriptor(content = True)
self._expect_invalid_attr(desc_text)
def test_trailing_line(self):
"""
Includes a line after the 'router-signature' entry.
"""
desc_text = get_relay_extrainfo_descriptor(content = True) + b'\nexit-streams-opened port=80'
self._expect_invalid_attr(desc_text)
def test_extrainfo_line_missing_fields(self):
"""
Checks that validation catches when the extra-info line is missing fields
and that without validation both the nickname and fingerprint are left as
None.
"""
test_entries = (
'ninja',
'ninja ',
'B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48',
' B2289C3EAB83ECD6EB916A2F481A02E6B76A0A48',
)
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({'extra-info': entry}, content = True)
desc = self._expect_invalid_attr(desc_text, 'nickname')
self.assertEqual(None, desc.nickname)
self.assertEqual(None, desc.fingerprint)
def test_geoip_db_digest(self):
"""
Parses the geoip-db-digest and geoip6-db-digest lines with valid and
invalid data.
"""
geoip_db_digest = '916A3CA8B7DF61473D5AE5B21711F35F301CE9E8'
desc = get_relay_extrainfo_descriptor({'geoip-db-digest': geoip_db_digest})
self.assertEqual(geoip_db_digest, desc.geoip_db_digest)
desc = get_relay_extrainfo_descriptor({'geoip6-db-digest': geoip_db_digest})
self.assertEqual(geoip_db_digest, desc.geoip6_db_digest)
test_entries = (
'',
'916A3CA8B7DF61473D5AE5B21711F35F301CE9E',
'916A3CA8B7DF61473D5AE5B21711F35F301CE9E88',
'916A3CA8B7DF61473D5AE5B21711F35F301CE9EG',
'916A3CA8B7DF61473D5AE5B21711F35F301CE9E-',
)
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({'geoip-db-digest': entry}, content = True)
self._expect_invalid_attr(desc_text, 'geoip_db_digest')
desc_text = get_relay_extrainfo_descriptor({'geoip6-db-digest': entry}, content = True)
self._expect_invalid_attr(desc_text, 'geoip6_db_digest')
def test_cell_circuits_per_decile(self):
"""
Parses the cell-circuits-per-decile line with valid and invalid data.
"""
test_entries = (
('0', 0),
('11', 11),
)
for entry in ('0', '11', '25'):
desc = get_relay_extrainfo_descriptor({'cell-circuits-per-decile': entry})
self.assertEqual(int(entry), desc.cell_circuits_per_decile)
test_entries = (
'',
' ',
'-5',
'blarg',
)
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({'cell-circuits-per-decile': entry}, content = True)
self._expect_invalid_attr(desc_text, 'cell_circuits_per_decile')
def test_dir_response_lines(self):
"""
Parses the dirreq-v2-resp and dirreq-v3-resp lines with valid and invalid
data.
"""
for keyword in ('dirreq-v2-resp', 'dirreq-v3-resp'):
attr = keyword.replace('-', '_').replace('dirreq', 'dir').replace('resp', 'responses')
unknown_attr = attr + '_unknown'
test_value = 'ok=0,unavailable=0,not-found=984,not-modified=0,something-new=7'
desc = get_relay_extrainfo_descriptor({keyword: test_value})
self.assertEqual(0, getattr(desc, attr)[DirResponse.OK])
self.assertEqual(0, getattr(desc, attr)[DirResponse.UNAVAILABLE])
self.assertEqual(984, getattr(desc, attr)[DirResponse.NOT_FOUND])
self.assertEqual(0, getattr(desc, attr)[DirResponse.NOT_MODIFIED])
self.assertEqual(7, getattr(desc, unknown_attr)['something-new'])
test_entries = (
'ok=-4',
'ok:4',
'ok=4.not-found=3',
)
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
desc = self._expect_invalid_attr(desc_text)
self.assertEqual(None, getattr(desc, attr))
self.assertEqual(None, getattr(desc, unknown_attr))
def test_dir_stat_lines(self):
"""
Parses the dirreq-v2-direct-dl, dirreq-v3-direct-dl, dirreq-v2-tunneled-dl,
and dirreq-v3-tunneled-dl lines with valid and invalid data.
"""
for keyword in ('dirreq-v2-direct-dl', 'dirreq-v2-direct-dl', 'dirreq-v2-tunneled-dl', 'dirreq-v2-tunneled-dl'):
attr = keyword.replace('-', '_').replace('dirreq', 'dir')
unknown_attr = attr + '_unknown'
test_value = 'complete=2712,timeout=32,running=4,min=741,d1=14507,d2=22702,q1=28881,d3=38277,d4=73729,md=111455,d6=168231,d7=257218,q3=319833,d8=390507,d9=616301,something-new=11,max=29917857'
desc = get_relay_extrainfo_descriptor({keyword: test_value})
self.assertEqual(2712, getattr(desc, attr)[DirStat.COMPLETE])
self.assertEqual(32, getattr(desc, attr)[DirStat.TIMEOUT])
self.assertEqual(4, getattr(desc, attr)[DirStat.RUNNING])
self.assertEqual(741, getattr(desc, attr)[DirStat.MIN])
self.assertEqual(14507, getattr(desc, attr)[DirStat.D1])
self.assertEqual(22702, getattr(desc, attr)[DirStat.D2])
self.assertEqual(28881, getattr(desc, attr)[DirStat.Q1])
self.assertEqual(38277, getattr(desc, attr)[DirStat.D3])
self.assertEqual(73729, getattr(desc, attr)[DirStat.D4])
self.assertEqual(111455, getattr(desc, attr)[DirStat.MD])
self.assertEqual(168231, getattr(desc, attr)[DirStat.D6])
self.assertEqual(257218, getattr(desc, attr)[DirStat.D7])
self.assertEqual(319833, getattr(desc, attr)[DirStat.Q3])
self.assertEqual(390507, getattr(desc, attr)[DirStat.D8])
self.assertEqual(616301, getattr(desc, attr)[DirStat.D9])
self.assertEqual(29917857, getattr(desc, attr)[DirStat.MAX])
self.assertEqual(11, getattr(desc, unknown_attr)['something-new'])
test_entries = (
'complete=-4',
'complete:4',
'complete=4.timeout=3',
)
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
desc = self._expect_invalid_attr(desc_text)
self.assertEqual(None, getattr(desc, attr))
self.assertEqual(None, getattr(desc, unknown_attr))
def test_conn_bi_direct(self):
"""
Parses the conn-bi-direct line with valid and invalid data.
"""
desc = get_relay_extrainfo_descriptor({'conn-bi-direct': '2012-05-03 12:07:50 (500 s) 277431,12089,0,2134'})
self.assertEqual(datetime.datetime(2012, 5, 3, 12, 7, 50), desc.conn_bi_direct_end)
self.assertEqual(500, desc.conn_bi_direct_interval)
self.assertEqual(277431, desc.conn_bi_direct_below)
self.assertEqual(12089, desc.conn_bi_direct_read)
self.assertEqual(0, desc.conn_bi_direct_write)
self.assertEqual(2134, desc.conn_bi_direct_both)
test_entries = (
'',
'2012-05-03 ',
'2012-05-03',
'2012-05-03 12:07:60 (500 s)',
'2012-05-03 12:07:50 (500s)',
'2012-05-03 12:07:50 (500 s',
'2012-05-03 12:07:50 (500 )',
'2012-05-03 12:07:50 (500 s)11',
'2012-05-03 12:07:50 (500 s) 277431,12089,0',
'2012-05-03 12:07:50 (500 s) 277431,12089,0a,2134',
'2012-05-03 12:07:50 (500 s) -277431,12089,0,2134',
)
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({'conn-bi-direct': entry}, content = True)
desc = self._expect_invalid_attr(desc_text)
self.assertEqual(None, desc.conn_bi_direct_end)
self.assertEqual(None, desc.conn_bi_direct_interval)
self.assertEqual(None, desc.conn_bi_direct_below)
self.assertEqual(None, desc.conn_bi_direct_read)
self.assertEqual(None, desc.conn_bi_direct_write)
self.assertEqual(None, desc.conn_bi_direct_both)
def test_percentage_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" num%
"""
for keyword in ('dirreq-v2-share', 'dirreq-v3-share'):
attr = keyword.replace('-', '_').replace('dirreq', 'dir')
test_entries = (
('0.00%', 0.0),
('0.01%', 0.0001),
('50%', 0.5),
('100.0%', 1.0),
)
for test_value, expected_value in test_entries:
desc = get_relay_extrainfo_descriptor({keyword: test_value})
self.assertEqual(expected_value, getattr(desc, attr))
test_entries = (
(''),
(' '),
('100'),
('-5%'),
)
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
self._expect_invalid_attr(desc_text, attr)
def test_number_list_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" num,...,num
"""
for keyword in ('cell-processed-cells', 'cell-queued-cells', 'cell-time-in-queue'):
attr = keyword.replace('-', '_')
test_entries = (
('', []),
(' ', []),
('0,0,0', [0.0, 0.0, 0.0]),
('2.3,-4.6,8.9,16.12,32.15', [2.3, -4.6, 8.9, 16.12, 32.15]),
)
for test_value, expected_value in test_entries:
desc = get_relay_extrainfo_descriptor({keyword: test_value})
self.assertEqual(expected_value, getattr(desc, attr))
test_entries = (
(',,11', [11.0]),
('abc,5.7,def', [5.7]),
('blarg', []),
)
for entry, expected in test_entries:
desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
self._expect_invalid_attr(desc_text, attr, expected)
def test_timestamp_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" YYYY-MM-DD HH:MM:SS
"""
for keyword in ('published', 'geoip-start-time'):
attr = keyword.replace('-', '_')
desc = get_relay_extrainfo_descriptor({keyword: '2012-05-03 12:07:50'})
self.assertEqual(datetime.datetime(2012, 5, 3, 12, 7, 50), getattr(desc, attr))
test_entries = (
'',
'2012-05-03 12:07:60',
'2012-05-03 ',
'2012-05-03',
)
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
self._expect_invalid_attr(desc_text, attr)
def test_timestamp_and_interval_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" YYYY-MM-DD HH:MM:SS (NSEC s)
"""
for keyword in ('cell-stats-end', 'entry-stats-end', 'exit-stats-end', 'bridge-stats-end', 'dirreq-stats-end'):
end_attr = keyword.replace('-', '_').replace('dirreq', 'dir')
interval_attr = end_attr[:-4] + '_interval'
desc = get_relay_extrainfo_descriptor({keyword: '2012-05-03 12:07:50 (500 s)'})
self.assertEqual(datetime.datetime(2012, 5, 3, 12, 7, 50), getattr(desc, end_attr))
self.assertEqual(500, getattr(desc, interval_attr))
test_entries = (
'',
'2012-05-03 ',
'2012-05-03',
'2012-05-03 12:07:60 (500 s)',
'2012-05-03 12:07:50 (500s)',
'2012-05-03 12:07:50 (500 s',
'2012-05-03 12:07:50 (500 )',
)
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
desc = self._expect_invalid_attr(desc_text)
self.assertEqual(None, getattr(desc, end_attr))
self.assertEqual(None, getattr(desc, interval_attr))
def test_timestamp_interval_and_value_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" YYYY-MM-DD HH:MM:SS (NSEC s) NUM,NUM,NUM,NUM,NUM...
"""
for keyword in ('read-history', 'write-history', 'dirreq-read-history', 'dirreq-write-history'):
base_attr = keyword.replace('-', '_').replace('dirreq', 'dir')
end_attr = base_attr + '_end'
interval_attr = base_attr + '_interval'
values_attr = base_attr + '_values'
test_entries = (
('', []),
(' ', []),
(' 50,11,5', [50, 11, 5]),
)
for test_values, expected_values in test_entries:
desc = get_relay_extrainfo_descriptor({keyword: '2012-05-03 12:07:50 (500 s)%s' % test_values})
self.assertEqual(datetime.datetime(2012, 5, 3, 12, 7, 50), getattr(desc, end_attr))
self.assertEqual(500, getattr(desc, interval_attr))
self.assertEqual(expected_values, getattr(desc, values_attr))
test_entries = (
'',
'2012-05-03 ',
'2012-05-03',
'2012-05-03 12:07:60 (500 s)',
'2012-05-03 12:07:50 (500s)',
'2012-05-03 12:07:50 (500 s',
'2012-05-03 12:07:50 (500 )',
'2012-05-03 12:07:50 (500 s)11',
)
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
desc = self._expect_invalid_attr(desc_text)
self.assertEqual(None, getattr(desc, end_attr))
self.assertEqual(None, getattr(desc, interval_attr))
self.assertEqual(None, getattr(desc, values_attr))
def test_port_mapping_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" port=N,port=N,...
"""
for keyword in ('exit-kibibytes-written', 'exit-kibibytes-read', 'exit-streams-opened'):
attr = keyword.replace('-', '_')
test_entries = (
('', {}),
('443=100,other=111', {443: 100, 'other': 111}),
('80=115533759,443=1777,995=690', {80: 115533759, 443: 1777, 995: 690}),
)
for test_value, expected_value in test_entries:
desc = get_relay_extrainfo_descriptor({keyword: test_value})
self.assertEqual(expected_value, getattr(desc, attr))
test_entries = (
'8000000=115533759',
'-80=115533759',
'80=-115533759',
'=115533759',
'80=',
'80,115533759',
)
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
self._expect_invalid_attr(desc_text, attr)
def test_hidden_service_stats_end(self):
"""
Exercise the hidserv-stats-end, which should be a simple date.
"""
desc = get_relay_extrainfo_descriptor({'hidserv-stats-end': '2012-05-03 12:07:50'})
self.assertEqual(datetime.datetime(2012, 5, 3, 12, 7, 50), desc.hs_stats_end)
test_entries = (
'',
'2012',
'2012-05',
'2012-05-03',
'2012-05-03 12',
'2012-05-03 12:07',
'2012-05-03 12:07:-50',
)
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({'hidserv-stats-end': entry}, content = True)
self._expect_invalid_attr(desc_text, 'hs_stats_end')
def test_hidden_service_stats(self):
"""
Check the 'hidserv-rend-relayed-cells' and 'hidserv-dir-onions-seen', which
share the same format.
"""
attributes = (
('hidserv-rend-relayed-cells', 'hs_rend_cells', 'hs_rend_cells_attr'),
('hidserv-dir-onions-seen', 'hs_dir_onions_seen', 'hs_dir_onions_seen_attr'),
)
test_entries = (
'',
'hello',
' key=value',
'40 key',
'40 key value',
'40 key key=value',
)
for keyword, stat_attr, extra_attr in attributes:
# just the numeric stat (no extra attributes)
desc = get_relay_extrainfo_descriptor({keyword: '345'})
self.assertEqual(345, getattr(desc, stat_attr))
self.assertEqual({}, getattr(desc, extra_attr))
# values can be negative (#15276)
desc = get_relay_extrainfo_descriptor({keyword: '-345'})
self.assertEqual(-345, getattr(desc, stat_attr))
self.assertEqual({}, getattr(desc, extra_attr))
# with extra attributes
desc = get_relay_extrainfo_descriptor({keyword: '345 spiffy=true snowmen=neat'})
self.assertEqual(345, getattr(desc, stat_attr))
self.assertEqual({'spiffy': 'true', 'snowmen': 'neat'}, getattr(desc, extra_attr))
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
self._expect_invalid_attr(desc_text, stat_attr)
self._expect_invalid_attr(desc_text, extra_attr, {})
def test_locale_mapping_lines(self):
"""
Uses valid and invalid data to tests lines of the form...
"<keyword>" CC=N,CC=N,...
"""
for keyword in ('dirreq-v2-ips', 'dirreq-v3-ips', 'dirreq-v2-reqs', 'dirreq-v3-reqs', 'geoip-client-origins', 'entry-ips', 'bridge-ips'):
attr = keyword.replace('-', '_').replace('dirreq', 'dir').replace('reqs', 'requests')
test_entries = (
('', {}),
('uk=5,de=3,jp=2', {'uk': 5, 'de': 3, 'jp': 2}),
)
for test_value, expected_value in test_entries:
desc = get_relay_extrainfo_descriptor({keyword: test_value})
self.assertEqual(expected_value, getattr(desc, attr))
test_entries = (
'uk=-4',
'uki=4',
'uk:4',
'uk=4.de=3',
)
for entry in test_entries:
desc_text = get_relay_extrainfo_descriptor({keyword: entry}, content = True)
self._expect_invalid_attr(desc_text, attr)
def test_minimal_bridge_descriptor(self):
"""
Basic sanity check that we can parse a descriptor with minimal attributes.
"""
desc = get_bridge_extrainfo_descriptor()
self.assertEqual('ec2bridgereaac65a3', desc.nickname)
self.assertEqual('1EC248422B57D9C0BD751892FE787585407479A4', desc.fingerprint)
self.assertEqual('006FD96BA35E7785A6A3B8B75FE2E2435A13BDB4', desc.digest())
self.assertEqual([], desc.get_unrecognized_lines())
# check that we don't have crypto fields
self.assertRaises(AttributeError, getattr, desc, 'signature')
def test_bridge_ip_versions_line(self):
"""
Parses the 'bridge-ip-versions' line, which only appears in bridges.
"""
desc = get_bridge_extrainfo_descriptor({'bridge-ip-versions': 'v4=16,v6=40'})
self.assertEqual({'v4': 16, 'v6': 40}, desc.ip_versions)
desc = get_bridge_extrainfo_descriptor({'bridge-ip-versions': ''})
self.assertEqual({}, desc.ip_versions)
desc_text = get_bridge_extrainfo_descriptor({'bridge-ip-versions': 'v4=24.5'}, content = True)
self.assertRaises(ValueError, RelayExtraInfoDescriptor, desc_text, True)
def test_bridge_ip_transports_line(self):
"""
Parses the 'bridge-ip-transports' line, which only appears in bridges.
"""
desc = get_bridge_extrainfo_descriptor({'bridge-ip-transports': '<OR>=16,<??>=40'})
self.assertEqual({'<OR>': 16, '<??>': 40}, desc.ip_transports)
desc = get_bridge_extrainfo_descriptor({'bridge-ip-transports': ''})
self.assertEqual({}, desc.ip_transports)
desc_text = get_bridge_extrainfo_descriptor({'bridge-ip-transports': '<OR>=24.5'}, content = True)
self.assertRaises(ValueError, RelayExtraInfoDescriptor, desc_text, True)
def test_transport_line(self):
"""
Basic exercise for both a bridge and relay's transport entry.
"""
desc = get_bridge_extrainfo_descriptor({'transport': 'obfs3'})
self.assertEqual({'obfs3': (None, None, None)}, desc.transport)
self.assertEqual([], desc.get_unrecognized_lines())
desc = get_relay_extrainfo_descriptor({'transport': 'obfs2 83.212.96.201:33570'})
self.assertEqual({'obfs2': ('83.212.96.201', 33570, [])}, desc.transport)
self.assertEqual([], desc.get_unrecognized_lines())
# multiple transport lines
desc = get_bridge_extrainfo_descriptor({'transport': 'obfs3\ntransport obfs4'})
self.assertEqual({'obfs3': (None, None, None), 'obfs4': (None, None, None)}, desc.transport)
self.assertEqual([], desc.get_unrecognized_lines())
def _expect_invalid_attr(self, desc_text, attr = None, expected_value = None):
"""
Asserts that construction will fail due to desc_text having a malformed
attribute. If an attr is provided then we check that it matches an expected
value when we're constructed without validation.
"""
self.assertRaises(ValueError, RelayExtraInfoDescriptor, desc_text, True)
desc = RelayExtraInfoDescriptor(desc_text, validate = False)
if attr:
# check that the invalid attribute matches the expected value when
# constructed without validation
self.assertEqual(expected_value, getattr(desc, attr))
else:
# check a default attribute
self.assertEqual('ninja', desc.nickname)
return desc
| lgpl-3.0 |
matsumoto-r/synciga | src/build/ios/PRESUBMIT.py | 68 | 1276 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
"""Chromium presubmit script for src/tools/ios.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
WHITELIST_FILE = 'build/ios/grit_whitelist.txt'
def _CheckWhitelistSorted(input_api, output_api):
for path in input_api.LocalPaths():
if WHITELIST_FILE == path:
lines = open(os.path.join('../..', WHITELIST_FILE)).readlines()
sorted = all(lines[i] <= lines[i + 1] for i in xrange(len(lines) - 1))
if not sorted:
return [output_api.PresubmitError(
'The file ' + WHITELIST_FILE + ' must be sorted.')]
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckWhitelistSorted(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
| bsd-3-clause |
USGSDenverPychron/pychron | pychron/mv/co2_locator.py | 1 | 1077 | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.mv.locator import Locator
class CO2Locator(Locator):
pass
# ============= EOF =============================================
| apache-2.0 |
cryptobanana/ansible | docs/docsite/rst/dev_guide/style_guide/conf.py | 120 | 11810 | # -*- coding: utf-8 -*-
#
# Ansible Style Guide documentation build configuration file, created by
# sphinx-quickstart on Mon May 11 12:41:35 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ansible Style Guide'
copyright = u'2015, Sandra A Wills'
author = u'Sandra A Wills'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
html_theme = 'srtd'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
html_title = 'Ansible Style Guide'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
html_short_title = 'Style Guide'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AnsibleStyleGuidedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AnsibleStyleGuide.tex', u'Ansible Style Guide Documentation',
u'Sandra A Wills', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ansiblestyleguide', u'Ansible Style Guide Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AnsibleStyleGuide', u'Ansible Style Guide Documentation',
author, 'AnsibleStyleGuide', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| gpl-3.0 |
JrGoodle/clowder | clowder/util/formatting.py | 1 | 13335 | """String formatting utilities
.. codeauthor:: Joe DeCapo <joe@polka.cat>
"""
import math
from pathlib import Path
from typing import List, Optional, Tuple, Union
from clowder.util.console import CONSOLE
Output = Union[str, Path]
def bold(output: Output) -> str:
return f'[bold]{output}[/bold]'
def cyan(output: Output) -> str:
return f'[cyan]{output}[/cyan]'
def green(output: Output) -> str:
return f'[green]{output}[/green]'
def red(output: Output) -> str:
return f'[red]{output}[/red]'
def magenta(output: Output) -> str:
return f'[magenta]{output}[/magenta]'
def yellow(output: Output) -> str:
return f'[yellow]{output}[/yellow]'
def escape(output: Output) -> str:
import rich.markup as markup
return markup.escape(output)
def underline(output: Output) -> str:
return f'[underline]{output}[/underline]'
# TODO: Update to return list of all duplicates found
def check_for_duplicates(list_of_elements: List[str]) -> Optional[str]:
"""Check if given list contains any duplicates
:param List[str] list_of_elements: List of strings to check for duplicates
:return: First duplicate encountered, or None if no duplicates found
"""
set_of_elements = set()
for elem in list_of_elements:
if elem in set_of_elements:
return elem
else:
set_of_elements.add(elem)
return None
def clowder_command(cmd: str) -> str:
"""Return formatted clowder command name
:param str cmd: Clowder command name
:return: Formatted clowder command name
"""
return bold(cmd)
def clowder_name(name: str) -> str:
"""Return formatted clowder name
:param str name: Clowder name
:return: Formatted clowder name
"""
return bold(name)
def command(cmd: Union[str, List[str]]) -> str:
"""Return formatted command name
:param Union[str, List[str]] cmd: Clowder command name
:return: Formatted clowder command name
"""
command_output = " ".join(cmd) if isinstance(cmd, list) else cmd
return bold(f"$ {command_output}")
def invalid_yaml(name: str) -> str:
"""Return error message for invalid yaml file
:param str name: Invalid file's name
:return: Formatted yaml error
"""
return f"{path(Path(name))} appears to be invalid"
# def error_source_not_found(source: str, yml: Path, project: str, upstream_name: Optional[str] = None) -> str:
# """Return formatted error string for project with unknown source specified
#
# :param str source: Source name
# :param Path yml: Path to yaml file
# :param str project: Project name
# :param Optional[str] upstream_name: Upstream name
# :return: Formatted source not found error
# """
#
# upstream_output = ""
# if upstream_name:
# upstream_output = f" for upstream '{upstream_name}'"
#
# messages = [invalid_yaml(yml.name),
# f"{yaml_path(yml)}",
# f"source '{source}'{upstream_output} specified in project '{project}' not found in 'sources'"]
# return "\n".join(messages)
# FIXME: Only print project name using this where appropriate (now that project status and upstream_string
# are printed back to back)
def upstream(name: str) -> str:
"""Return formatted upstream name
:param str name: Upstream name
:return: Formatted upstream name
"""
return cyan(name)
def options_help_message(options: Tuple[str, ...], message: str) -> str:
"""Help message for groups option
:param Tuple[str, ...] options: List of options
:param str message: Help message
:return: Formatted options help message
"""
if options == [''] or options is None or options == [] or not all(isinstance(n, str) for n in options):
return message
help_message = '''
{0}:
{1}
'''
return help_message.format(message, ', '.join(options))
def project_options_help_message(message: str) -> str:
"""Help message for projects/groups options
:param str message: Help message
:return: Formatted options help message
"""
def column_entry(choices, length, line):
if len(choices) > 0 and line < len(choices):
return choices[line].ljust(length)
return "".ljust(length)
def three_column_width(choices, title, spacing=0):
options = list(choices)
options += title
length = len(max(options, key=len))
return length + spacing
from clowder.clowder_controller import CLOWDER_CONTROLLER
project_names = CLOWDER_CONTROLLER.project_names
upstream_names = CLOWDER_CONTROLLER.upstream_names
project_paths = CLOWDER_CONTROLLER.project_paths
project_groups = CLOWDER_CONTROLLER.project_groups
valid_project_names = _validate_help_options(project_names)
valid_paths = _validate_help_options(project_paths)
valid_groups = _validate_help_options(project_groups)
if not valid_project_names or not valid_paths or not valid_groups:
return message
project_names_title = "Project Names"
project_names_underline = "-------------"
project_paths_title = "Project Paths"
project_paths_underline = "-------------"
project_groups_title = "Project Groups"
project_groups_underline = "--------------"
if not upstream_names:
project_names_column_width = three_column_width(project_names, project_names_title, spacing=2)
project_paths_column_width = three_column_width(project_paths, project_paths_title, spacing=2)
project_groups_column_width = three_column_width(project_groups, project_groups_title)
max_column_lines = max(len(project_names), len(project_paths), len(project_groups))
message = f'{message}:\n\n'
message += project_names_title.ljust(project_names_column_width)
message += project_paths_title.ljust(project_paths_column_width)
message += project_groups_title.ljust(project_groups_column_width)
message += "\n"
message += project_names_underline.ljust(project_names_column_width)
message += project_paths_underline.ljust(project_paths_column_width)
message += project_groups_underline.ljust(project_groups_column_width)
message += "\n"
column_line = 0
while column_line < max_column_lines:
message += column_entry(project_names, project_names_column_width, column_line)
message += column_entry(project_paths, project_paths_column_width, column_line)
message += column_entry(project_groups, project_groups_column_width, column_line)
message += "\n"
column_line += 1
return message
valid_upstream_names = _validate_help_options(upstream_names)
if not valid_upstream_names:
return message
def two_column_width(choices_1, title_1, choices_2, title_2, spacing=0):
options = list(choices_1)
options += list(choices_2)
options += title_1
options += title_2
length = len(max(options, key=len))
return length + spacing
upstream_names_title = "Upstream Names"
upstream_names_underline = "--------------"
names_column_width = two_column_width(project_names, project_names_title,
upstream_names, upstream_names_title, spacing=2)
paths_groups_width = two_column_width(project_paths, project_paths_title, project_groups, project_groups_title)
message = f'{message}:\n\n'
message += project_names_title.ljust(names_column_width)
message += project_paths_title.ljust(paths_groups_width)
message += "\n"
message += project_names_underline.ljust(names_column_width)
message += project_paths_underline.ljust(paths_groups_width)
message += "\n"
max_column_length = max(len(project_names), len(project_paths))
column_line = 0
while column_line < max_column_length:
message += column_entry(project_names, names_column_width, column_line)
message += column_entry(project_paths, paths_groups_width, column_line)
message += "\n"
column_line += 1
message += "\n"
message += upstream_names_title.ljust(names_column_width)
message += project_groups_title.ljust(paths_groups_width)
message += "\n"
message += upstream_names_underline.ljust(names_column_width)
message += project_groups_underline.ljust(paths_groups_width)
message += "\n"
max_column_length = max(len(upstream_names), len(project_groups))
column_line = 0
while column_line < max_column_length:
message += column_entry(upstream_names, names_column_width, column_line)
message += column_entry(project_groups, paths_groups_width, column_line)
message += "\n"
column_line += 1
return message
def path(text: Path) -> str:
"""Return formatted path
:param Path text: Path name
:return: Formatted path name
"""
return cyan(text.resolve())
def ref(text: str) -> str:
"""Return formatted ref name
:param str text: Git reference
:return: Formatted ref name
"""
return magenta(text)
def remote(text: str) -> str:
"""Return formatted remote name
:param str text: Remote name
:return: Formmatted remote name
"""
return yellow(text)
def remove_prefix(text: str, prefix: str) -> str:
"""Remove prefix from string
:param str text: Text to remove prefix from
:param str prefix: Prefix to remoe
:return: Text with prefix removed if present
"""
if text.startswith(prefix):
return text[len(prefix):]
return text
def url_string(url: str) -> str:
"""Return formatted url
:param str url: URL
:return: Formatted URL
"""
return cyan(url)
def version_options_help_message(message: str, versions: Tuple[str, ...]) -> str:
"""Help message for projects/groups options
:param str message: Help message
:param Tuple[str, ...] versions: Version choices
:return: Formatted options help message
"""
if not _validate_help_options(versions):
return message
message = f"{message}:\n\n"
if len(versions) < 10:
for v in versions:
message += f"{v}\n"
return message
terminal_width = CONSOLE.width
def column_entry(choices, length, line):
if len(choices) > 0 and line < len(choices):
return choices[line].ljust(length)
return "".ljust(length)
def column_width(choices, spacing=0):
length = len(max(choices, key=len))
return length + spacing
# Determine required widths for 3 column layout
max_lines = math.ceil(len(versions) / 3)
first_column_versions = versions[:max_lines]
second_column_versions = versions[max_lines:2*max_lines]
third_column_versions = versions[2*max_lines:]
first_column_width = column_width(first_column_versions, spacing=2)
second_column_width = column_width(second_column_versions, spacing=2)
third_column_width = column_width(third_column_versions)
total_width = first_column_width + second_column_width + third_column_width
if total_width < terminal_width:
column_line = 0
while column_line < max_lines:
message += column_entry(first_column_versions, first_column_width, column_line)
message += column_entry(second_column_versions, second_column_width, column_line)
message += column_entry(third_column_versions, third_column_width, column_line)
message += "\n"
column_line += 1
return message
# If doesn't fit, determine required widths for 2 column layout
column_length = math.ceil(len(versions) / 2)
first_column_versions = versions[:column_length]
second_column_versions = versions[column_length:]
first_column_width = column_width(first_column_versions, spacing=2)
second_column_width = column_width(second_column_versions)
total_width = first_column_width + second_column_width
if total_width < terminal_width:
column_line = 0
while column_line < max_lines:
message += column_entry(first_column_versions, first_column_width, column_line)
message += column_entry(second_column_versions, second_column_width, column_line)
message += "\n"
column_line += 1
return message
for v in versions:
message += f"{v}\n"
return message
def version(version_name: str) -> str:
"""Return formatted string for clowder yaml version
:param str version_name: Clowder version name
:return: Formatted clowder version name
"""
return bold(version_name)
def project_name(name: str) -> str:
"""Return formatted string for project name
:param str name: Project name
:return: Formatted project name
"""
return green(name)
def _validate_help_options(options: Optional[Union[str, list, tuple]]) -> bool:
"""Validate help options is valid
:param str options: Possible options
:return: Whether options is valid
"""
if options is None:
return False
if not options:
return False
if options == ['']:
return False
if not all(isinstance(n, str) for n in options):
return False
return True
| mit |
mhaessig/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_handshake_hybi00.py | 466 | 17345 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handshake.hybi00 module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake.hybi00 import Handshaker
from mod_pywebsocket.handshake.hybi00 import _validate_subprotocol
from test import mock
_TEST_KEY1 = '4 @1 46546xW%0l 1 5'
_TEST_KEY2 = '12998 5 Y3 1 .P00'
_TEST_KEY3 = '^n:ds[4U'
_TEST_CHALLENGE_RESPONSE = '8jKS\'y:G*Co,Wxa-'
_GOOD_REQUEST = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_REQUEST_CAPITALIZED_HEADER_VALUES = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'UPGRADE',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WEBSOCKET',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_REQUEST_CASE_MIXED_HEADER_NAMES = (
80,
'GET',
'/demo',
{
'hOsT': 'example.com',
'cOnNeCtIoN': 'Upgrade',
'sEc-wEbsOcKeT-kEy2': _TEST_KEY2,
'sEc-wEbsOcKeT-pRoToCoL': 'sample',
'uPgRaDe': 'WebSocket',
'sEc-wEbsOcKeT-kEy1': _TEST_KEY1,
'oRiGiN': 'http://example.com',
},
_TEST_KEY3)
_GOOD_RESPONSE_DEFAULT_PORT = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_RESPONSE_SECURE = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: wss://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_REQUEST_NONDEFAULT_PORT = (
8081,
'GET',
'/demo',
{
'Host': 'example.com:8081',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_RESPONSE_NONDEFAULT_PORT = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com:8081/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_RESPONSE_SECURE_NONDEF = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: wss://example.com:8081/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_REQUEST_NO_PROTOCOL = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_RESPONSE_NO_PROTOCOL = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_GOOD_REQUEST_WITH_OPTIONAL_HEADERS = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'EmptyValue': '',
'Sec-WebSocket-Protocol': 'sample',
'AKey': 'AValue',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
# TODO(tyoshino): Include \r \n in key3, challenge response.
_GOOD_REQUEST_WITH_NONPRINTABLE_KEY = (
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': 'y R2 48 Q1O4 e|BV3 i5 1 u- 65',
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': '36 7 74 i 92 2\'m 9 0G',
'Origin': 'http://example.com',
},
''.join(map(chr, [0x01, 0xd1, 0xdd, 0x3b, 0xd1, 0x56, 0x63, 0xff])))
_GOOD_RESPONSE_WITH_NONPRINTABLE_KEY = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
''.join(map(chr, [0x0b, 0x99, 0xfa, 0x55, 0xbd, 0x01, 0x23, 0x7b,
0x45, 0xa2, 0xf1, 0xd0, 0x87, 0x8a, 0xee, 0xeb])))
_GOOD_REQUEST_WITH_QUERY_PART = (
80,
'GET',
'/demo?e=mc2',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3)
_GOOD_RESPONSE_WITH_QUERY_PART = (
'HTTP/1.1 101 WebSocket Protocol Handshake\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Location: ws://example.com/demo?e=mc2\r\n'
'Sec-WebSocket-Origin: http://example.com\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'\r\n' +
_TEST_CHALLENGE_RESPONSE)
_BAD_REQUESTS = (
( # HTTP request
80,
'GET',
'/demo',
{
'Host': 'www.google.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5;'
' en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3'
' GTB6 GTBA',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,'
'*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
'Accept-Encoding': 'gzip,deflate',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive': '300',
'Connection': 'keep-alive',
}),
( # Wrong method
80,
'POST',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Missing Upgrade
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Wrong Upgrade
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'NonWebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Empty WebSocket-Protocol
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': '',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Wrong port number format
80,
'GET',
'/demo',
{
'Host': 'example.com:0x50',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Header/connection port mismatch
8080,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'sample',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
( # Illegal WebSocket-Protocol
80,
'GET',
'/demo',
{
'Host': 'example.com',
'Connection': 'Upgrade',
'Sec-WebSocket-Key2': _TEST_KEY2,
'Sec-WebSocket-Protocol': 'illegal\x09protocol',
'Upgrade': 'WebSocket',
'Sec-WebSocket-Key1': _TEST_KEY1,
'Origin': 'http://example.com',
},
_TEST_KEY3),
)
def _create_request(request_def):
data = ''
if len(request_def) > 4:
data = request_def[4]
conn = mock.MockConn(data)
conn.local_addr = ('0.0.0.0', request_def[0])
return mock.MockRequest(
method=request_def[1],
uri=request_def[2],
headers_in=request_def[3],
connection=conn)
def _create_get_memorized_lines(lines):
"""Creates a function that returns the given string."""
def get_memorized_lines():
return lines
return get_memorized_lines
def _create_requests_with_lines(request_lines_set):
requests = []
for lines in request_lines_set:
request = _create_request(_GOOD_REQUEST)
request.connection.get_memorized_lines = _create_get_memorized_lines(
lines)
requests.append(request)
return requests
class HyBi00HandshakerTest(unittest.TestCase):
def test_good_request_default_port(self):
request = _create_request(_GOOD_REQUEST)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
request.connection.written_data())
self.assertEqual('/demo', request.ws_resource)
self.assertEqual('http://example.com', request.ws_origin)
self.assertEqual('ws://example.com/demo', request.ws_location)
self.assertEqual('sample', request.ws_protocol)
def test_good_request_capitalized_header_values(self):
request = _create_request(_GOOD_REQUEST_CAPITALIZED_HEADER_VALUES)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
request.connection.written_data())
def test_good_request_case_mixed_header_names(self):
request = _create_request(_GOOD_REQUEST_CASE_MIXED_HEADER_NAMES)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_DEFAULT_PORT,
request.connection.written_data())
def test_good_request_secure_default_port(self):
request = _create_request(_GOOD_REQUEST)
request.connection.local_addr = ('0.0.0.0', 443)
request.is_https_ = True
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_SECURE,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_nondefault_port(self):
request = _create_request(_GOOD_REQUEST_NONDEFAULT_PORT)
handshaker = Handshaker(request,
mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_NONDEFAULT_PORT,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_secure_non_default_port(self):
request = _create_request(_GOOD_REQUEST_NONDEFAULT_PORT)
request.is_https_ = True
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_SECURE_NONDEF,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_default_no_protocol(self):
request = _create_request(_GOOD_REQUEST_NO_PROTOCOL)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_NO_PROTOCOL,
request.connection.written_data())
self.assertEqual(None, request.ws_protocol)
def test_good_request_optional_headers(self):
request = _create_request(_GOOD_REQUEST_WITH_OPTIONAL_HEADERS)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual('AValue',
request.headers_in['AKey'])
self.assertEqual('',
request.headers_in['EmptyValue'])
def test_good_request_with_nonprintable_key(self):
request = _create_request(_GOOD_REQUEST_WITH_NONPRINTABLE_KEY)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_WITH_NONPRINTABLE_KEY,
request.connection.written_data())
self.assertEqual('sample', request.ws_protocol)
def test_good_request_with_query_part(self):
request = _create_request(_GOOD_REQUEST_WITH_QUERY_PART)
handshaker = Handshaker(request, mock.MockDispatcher())
handshaker.do_handshake()
self.assertEqual(_GOOD_RESPONSE_WITH_QUERY_PART,
request.connection.written_data())
self.assertEqual('ws://example.com/demo?e=mc2', request.ws_location)
def test_bad_requests(self):
for request in map(_create_request, _BAD_REQUESTS):
handshaker = Handshaker(request, mock.MockDispatcher())
self.assertRaises(HandshakeException, handshaker.do_handshake)
class HyBi00ValidateSubprotocolTest(unittest.TestCase):
def test_validate_subprotocol(self):
# should succeed.
_validate_subprotocol('sample')
_validate_subprotocol('Sample')
_validate_subprotocol('sample\x7eprotocol')
_validate_subprotocol('sample\x20protocol')
# should fail.
self.assertRaises(HandshakeException,
_validate_subprotocol,
'')
self.assertRaises(HandshakeException,
_validate_subprotocol,
'sample\x19protocol')
self.assertRaises(HandshakeException,
_validate_subprotocol,
'sample\x7fprotocol')
self.assertRaises(HandshakeException,
_validate_subprotocol,
# "Japan" in Japanese
u'\u65e5\u672c')
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
joelddiaz/openshift-tools | openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/openshift_health_checker/library/check_yum_update.py | 59 | 4245 | #!/usr/bin/python
'''
Ansible module to test whether a yum update or install will succeed,
without actually performing it or running yum.
parameters:
packages: (optional) A list of package names to install or update.
If omitted, all installed RPMs are considered for updates.
'''
import sys
import yum # pylint: disable=import-error
from ansible.module_utils.basic import AnsibleModule
def main(): # pylint: disable=missing-docstring,too-many-branches
module = AnsibleModule(
argument_spec=dict(
packages=dict(type='list', default=[])
),
supports_check_mode=True
)
def bail(error): # pylint: disable=missing-docstring
module.fail_json(msg=error)
yb = yum.YumBase() # pylint: disable=invalid-name
yb.conf.disable_excludes = ["all"] # assume the openshift excluder will be managed, ignore current state
# determine if the existing yum configuration is valid
try:
yb.repos.populateSack(mdtype='metadata', cacheonly=1)
# for error of type:
# 1. can't reach the repo URL(s)
except yum.Errors.NoMoreMirrorsRepoError as e: # pylint: disable=invalid-name
bail('Error getting data from at least one yum repository: %s' % e)
# 2. invalid repo definition
except yum.Errors.RepoError as e: # pylint: disable=invalid-name
bail('Error with yum repository configuration: %s' % e)
# 3. other/unknown
# * just report the problem verbatim
except: # pylint: disable=bare-except; # noqa
bail('Unexpected error with yum repository: %s' % sys.exc_info()[1])
packages = module.params['packages']
no_such_pkg = []
for pkg in packages:
try:
yb.install(name=pkg)
except yum.Errors.InstallError as e: # pylint: disable=invalid-name
no_such_pkg.append(pkg)
except: # pylint: disable=bare-except; # noqa
bail('Unexpected error with yum install/update: %s' %
sys.exc_info()[1])
if not packages:
# no packages requested means test a yum update of everything
yb.update()
elif no_such_pkg:
# wanted specific packages to install but some aren't available
user_msg = 'Cannot install all of the necessary packages. Unavailable:\n'
for pkg in no_such_pkg:
user_msg += ' %s\n' % pkg
user_msg += 'You may need to enable one or more yum repositories to make this content available.'
bail(user_msg)
try:
txn_result, txn_msgs = yb.buildTransaction()
except: # pylint: disable=bare-except; # noqa
bail('Unexpected error during dependency resolution for yum update: \n %s' %
sys.exc_info()[1])
# find out if there are any errors with the update/install
if txn_result == 0: # 'normal exit' meaning there's nothing to install/update
pass
elif txn_result == 1: # error with transaction
user_msg = 'Could not perform a yum update.\n'
if len(txn_msgs) > 0:
user_msg += 'Errors from dependency resolution:\n'
for msg in txn_msgs:
user_msg += ' %s\n' % msg
user_msg += 'You should resolve these issues before proceeding with an install.\n'
user_msg += 'You may need to remove or downgrade packages or enable/disable yum repositories.'
bail(user_msg)
# TODO: it would be nice depending on the problem:
# 1. dependency for update not found
# * construct the dependency tree
# * find the installed package(s) that required the missing dep
# * determine if any of these packages matter to openshift
# * build helpful error output
# 2. conflicts among packages in available content
# * analyze dependency tree and build helpful error output
# 3. other/unknown
# * report the problem verbatim
# * add to this list as we come across problems we can clearly diagnose
elif txn_result == 2: # everything resolved fine
pass
else:
bail('Unknown error(s) from dependency resolution. Exit Code: %d:\n%s' %
(txn_result, txn_msgs))
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| apache-2.0 |
KyoungRan/Django_React_ex | Django_React_Workshop-mbrochh/django/myvenv/lib/python3.4/site-packages/django/contrib/admin/templatetags/admin_modify.py | 129 | 3006 | import json
from django import template
from django.template.context import Context
register = template.Library()
@register.inclusion_tag('admin/prepopulated_fields_js.html', takes_context=True)
def prepopulated_fields_js(context):
"""
Creates a list of prepopulated_fields that should render Javascript for
the prepopulated fields for both the admin form and inlines.
"""
prepopulated_fields = []
if 'adminform' in context:
prepopulated_fields.extend(context['adminform'].prepopulated_fields)
if 'inline_admin_formsets' in context:
for inline_admin_formset in context['inline_admin_formsets']:
for inline_admin_form in inline_admin_formset:
if inline_admin_form.original is None:
prepopulated_fields.extend(inline_admin_form.prepopulated_fields)
prepopulated_fields_json = []
for field in prepopulated_fields:
prepopulated_fields_json.append({
"id": "#%s" % field["field"].auto_id,
"name": field["field"].name,
"dependency_ids": ["#%s" % dependency.auto_id for dependency in field["dependencies"]],
"dependency_list": [dependency.name for dependency in field["dependencies"]],
"maxLength": field["field"].field.max_length or 50,
"allowUnicode": getattr(field["field"].field, "allow_unicode", False)
})
context.update({
'prepopulated_fields': prepopulated_fields,
'prepopulated_fields_json': json.dumps(prepopulated_fields_json),
})
return context
@register.inclusion_tag('admin/submit_line.html', takes_context=True)
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
show_save = context.get('show_save', True)
show_save_and_continue = context.get('show_save_and_continue', True)
ctx = Context(context)
ctx.update({
'show_delete_link': (
not is_popup and context['has_delete_permission'] and
change and context.get('show_delete', True)
),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': (
context['has_add_permission'] and not is_popup and
(not save_as or context['add'])
),
'show_save_and_continue': not is_popup and context['has_change_permission'] and show_save_and_continue,
'show_save': show_save,
})
return ctx
@register.filter
def cell_count(inline_admin_form):
"""Returns the number of cells used in a tabular inline"""
count = 1 # Hidden cell with hidden 'id' field
for fieldset in inline_admin_form:
# Loop through all the fields (one per cell)
for line in fieldset:
for field in line:
count += 1
if inline_admin_form.formset.can_delete:
# Delete checkbox
count += 1
return count
| mit |
OpenSourcePolicyCenter/multi-country | Python/7CountryAlphaV1/demographicswithclasses2.py | 2 | 15883 | import numpy as np
import csv
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import time
GENS = 90
FIRST_DEATH_AGE = 68
OLDEST_IMMIGRANTS = 65
FIRST_FERTILITY_YEAR = 23
LAST_FERTILITY_YEAR = 45
FIRST_WORK_YEAR = 21
SKILL_GROUPS = 2
MAX_YEARS = 300
"""
TODO:
#net_migration only goes to age 65?
#Constant migration each year?
#Low skill babies become low skill parents?
#Make some paramater like Maxyears = 300 that is the furthest number of years the model can go
#Look at paper to compare projections past 2050
#Make delta change for each Region, or import it from somewhere
#Make sure all the dimentions of data in here correspond to WorldModel (fertility is different for example)
#Update data readin files format
"""
def Region_information():
"""
********************************************************************************************************************
REGION CLASS DATA TYPES:
self.name: string
-name of the region, eg. "Russia", "EU"
-Used when generically printing a Region and to gather data from the folder Data_files
self.index: integer
-USA = 1, EU = 2, Japan = 3, China = 4, India = 5, Russia = 6, Korea = 7
-Used also to gather data from outside .csv files, specifically from population.csv and net_migration.csv
self.initial_population
-Vector of length GENS+1 for ages 0-GENS+1 that contains exact number of people in each
-Comes from population.csv for specific country
-Note: population.csv is in thousands of people, while self.initial_population is in exact number of people
self.net_migration
-Vector of length 65 for ages 1-65 that has the total number of immigrants for per year for each age group
-Comes from net_migration.csv for specific country
-Note: net_migration.csv is in hundreds of people, while self.net_migration is in exact number of people
self.fertility_rates
-Has 23 rows for ages 23-45 (when agents give birth) and 51 columns for years 2008-2058
-Essentially the transpose of the data in self.name_fertility.csv for nonnegative years
self.mortality_rates
-Has 23 rows for ages 68-90 and 51 columns for years 2008-2058
-Essentially the transpose of the data in self.name_mortality.csv
self.skill_distributions
-Vector of length 2 that has the percentage of total population in each skill group
self.population_timepath: np.array((GENS, number of simulated years, SKILL_GROUPS))
-91 rows for ages 0-90, Has 1 column for year 2008, and 2 depth for skill classes 0-1.
-Will add more columns for additional years simulated
-Stores the population size for each generation, year, and skill group
self.KID_mat: np.array((GENS, number of simulated years, SKILL_GROUPS))
-91 rows for ages 0-90, Has 1 column for year 2008, and 2 depth for skill classes 0-1.
-Will add more columns for additional years simulated
-Stores the number of kids each agent of that generation has. This implies the following:
-Ages 0-22 = 0
-Ages 45-68 = A constant because they have kids from their fertile years, but aren't having new kids
-Ages 68-90 = 0
self.timeendowment: np.array(number of simulated years)
-Stores the time endowment for each year. This is h(a, i) in the paper
self.delta: double
-Technilogical progress
REGION CLASS FUNCTIONS:
readindata():
newKids(year):
-Creates a vector of kids length 91x2 that is added to self.KID_mat for each year
simulate_demographics(Years):
-Simulates population changes for "Years" years
-Kills old people, makes new babies, adds immigration, gets a new time endowment, and adds new kids and generation to their respective matrices
plot_demographics(year):
-Plots Population distribution, Fertility, Mortality, and Net Migration for the inputed year across ages
plot_population_distribution(years):
-Takes in a list of years and plots the Population Distribution of those years on the same graph
plot_total_population(year):
-Plots the change in total population over time from 2008 to the inputed year
get_total_population(year):
-Returns the changes in totalpopulation up until the given year
get_population_distribution(year, returnclasses = False):
-Returns the population distribution for a given year. If returnclasses = True it returns the population with the extra dimension for classes
get_fertility_rate(year):
-Returns the fertility rate for a given year
get_mortality_rate(year, returnall = False):
-Returns mortality rate for a given year
-If returnall == True, it returns the whole array of mortality rates
get_total_netmigration_rate():
-Returns the net migration data across age
-As of now it is the same for each year
get_kids_mat(year, returnall = False):
-Returns the distribution of kids taken from KID_mat for the given year
-If returnall == True, it returns the entire KID_mat
******************************************************************************************************************
"""
class Region(object):
def __init__(self, name, index):
self.name = name
self.index = index
self.delta = .01
def readindata():
with open('Data_files/Backup/population.csv','r') as csv_file:
csv_reader=csv.reader(csv_file)
popdata = []
for age in csv_reader:
for region in csv_reader:
popdata.append(region[self.index])
popdata = np.array(popdata).astype(np.float)*1000
with open('Data_files/Backup/skillclasses.csv','r') as csv_file:
csv_reader=csv.reader(csv_file)
skilldata = []
for skill in csv_reader:
for region in csv_reader:
skilldata.append(region[self.index])
skilldata = np.array(skilldata).astype(np.float)
with open("Data_files/Backup/"+(self.name).lower()+"_fertility.csv","r") as csv_file:
csv_reader=csv.reader(csv_file)
fertdata = []
for age in csv_reader:
for year in csv_reader:
fertdata.append(year[1:])
initfertdata = np.zeros((91, 51))
initfertdata[23:46,:] = np.transpose(np.array(fertdata).astype(np.float)[48:,])
fertdata2 = initfertdata
#fertdata = np.transpose(np.array(fertdata).astype(np.float)[48:,])
with open("Data_files/Backup/"+str(self.name).lower()+"_mortality.csv","r") as csv_file:
csv_reader=csv.reader(csv_file)
mortdata = []
for age in csv_reader:
for year in csv_reader:
mortdata.append(year[1:])
initmortdata = np.zeros((91, 51))
initmortdata[68:,:] = np.transpose(np.array(mortdata).astype(np.float))
mortdata2 = initmortdata
mortdata = np.transpose(np.array(mortdata).astype(np.float))
with open('Data_files/Backup/net_migration.csv','r') as csv_file:
csv_reader=csv.reader(csv_file)
migdata = []
for age in csv_reader:
for region in csv_reader:
migdata.append(region[self.index])
initmigdata = np.zeros(91)
initmigdata[:65] = np.array(migdata).astype(np.float)
migdata2 = initmigdata*100
migdata = np.array(migdata).astype(np.float)*100
#Takes initial population and migration and give them an 2nd dimention of length 2 for each skill group
popskilldata = np.zeros((GENS+1, SKILL_GROUPS))
migskilldata = np.zeros((OLDEST_IMMIGRANTS, SKILL_GROUPS))
migskilldata2 = np.zeros((GENS+1, SKILL_GROUPS))
for k in range(SKILL_GROUPS):
popskilldata[:,k] = popdata*skilldata[k]
migskilldata[:,k] = migdata*skilldata[k]
migskilldata2[:,k] = migdata2*skilldata[k]
return popskilldata, migskilldata2, fertdata2, mortdata2, skilldata
self.initial_population, self.net_migration, self.fertility_rates, self.mortality_rates, self.skill_distributions = readindata()
self.population_timepath = np.zeros((91,MAX_YEARS+1,2))
self.population_timepath[:,0,:] = self.initial_population
#self.KID_mat = self.newKids(2008).reshape((91, 1, 2))
self.timeendowment = np.ones(1)
def __repr__(self):
return self.name
def newKids(self, year):
if year > 2050:
#We only have fertility rates after the year 2050
year = 2050
#Column that contains the number of kids each fertile age group has (ages 23-45 in this case)
fertilenumkids = np.cumsum(self.fertility_rates[0:FIRST_FERTILITY_YEAR+1, year-2008])
#Combines the number of kids for age groups 0-22, 23-45, 45-65, and 66-90 into one vector of length 91
numkids = np.hstack((np.zeros(23) , fertilenumkids , np.ones((20))*fertilenumkids[-1] , np.zeros(25)))
#Adding a column of numkids for each skill group
kidsvec = np.tile(numkids.reshape(GENS+1, 1), (1, SKILL_GROUPS))
return kidsvec
def simulate_demographics(self, Years):
Transyear = 50#Number of years for which we have data. Anything beyond this will using data for the 50th year
for t in xrange(1, Years):
#If the current year t is less than or equal to the greatest year for which we have data (Transyear), use the current year t as the index i for pulling data
if t <= Transyear:
i = t
#If the current year t is beyond the greatest year for which we have data (Transyear), use the most recent year as the index i for pulling data
elif t > Transyear:
i = Transyear
#For each skill group: Shifts the population one generation over, killing all of the oldest generation, and adds immigrants
population = self.population_timepath[:-1,t-1,:] + self.net_migration[:-1,:]
#Gets the survival probability of each generation/skillgroup and manipulates the dimensions for easy multiplication in the next step
survival_rates = np.transpose(np.tile((1-self.mortality_rates[1:,i]),(2,1)))
#Gets the surviving fraction of the population and stores it in the population timepath for the current year t
self.population_timepath[1:,t,:] = np.multiply(population, survival_rates)
#Gets the number of newborns by taking a dot product of the fertility rates and the population
newborns = np.reshape(np.dot(self.fertility_rates[:,i], self.population_timepath[:,i,:]), (1,1,2))
#Stores the number of newborns in generation 0 for the current year t
self.population_timepath[0,t,:] = newborns
def plot_demographics(self, year):
#IMPORTANT!! PLOTS THE SUM OF THE SKILL CLASSES. THATS' WHAT THE axis=1 NONSENSE IS
num_simulated_years = self.population_timepath.shape[1]
if year - 2008 >= num_simulated_years:
print "\nERROR: WE HAVE ONLY SIMULATED UP TO THE YEAR", num_simulated_years+2008, "AND YOU REQUESTED TO PLOT DATA FOR THE YEAR", year
print"*SEE plot_demog_distribution IN class Region(object)*\n"
time.sleep(10)
return None
year_index = year - 2008
plt.clf()
plt.suptitle(str(self.name+" Data for "+str(year)))
plt.subplot(2, 2, 1)
plt.plot(range(91),self.population_timepath[:,year_index,:].sum(axis=1))
plt.title("Population Distribution")
plt.grid()
plt.subplot(2, 2, 2)
plt.plot(range(23,46),self.get_fertility_rate(year))
plt.xlim(23, 46)
plt.title("Fertility Rates")
plt.grid()
plt.subplot(2, 2, 3)
plt.plot(range(68,91), self.get_mortality_rate(year))
plt.xlim(68, 89)
plt.ylim(0, 1)
plt.title("Mortality Rates")
plt.grid()
plt.subplot(2, 2, 4)
plt.plot(range(65), self.get_total_netmigration_rate())
plt.title("Total Net Migration")
plt.grid()
plt.show()
plt.clf()
def plot_population_distribution(self, years):
years = np.array(years)
for y in range(len(years)):
yeartograph = years[y]
num_simulated_years = self.population_timepath.shape[1]
if yeartograph - 2008 < num_simulated_years:
#IMPORTANT! Plots the sum of the skill classes for each year
plt.plot(range(91), self.population_timepath[:,yeartograph-2008,:].sum(axis=1))
else:
print "\nERROR: WE HAVE ONLY SIMULATED UP TO THE YEAR", num_simulated_years+2008-1, "AND YOU REQUESTED TO PLOT THE YEAR", yeartograph
print"*SEE plot_population_distribution IN class Region(object)*\n"
time.sleep(15)
return None
plt.title(str(self.name + " Population Distribution"))
plt.legend(years)
plt.show()
def plot_total_population(self, year):
totalpopulation = self.population_timepath.sum(axis=0).sum(axis=1)[:year-2008+1]
plt.plot(range(2008, year+1), totalpopulation/1000000)
plt.title(self.name+" Population Change from 2008-"+ str(year))
plt.xlim(2008, year)
plt.xlabel('Year')
plt.ylabel('Population (Millions)')
plt.show()
def get_total_population(self, year):
totalpopulation = self.population_timepath.sum(axis=0).sum(axis=1)[:year-2008+1]
return totalpopulation
def get_population_distribution(self, year, returnclasses = False):
if returnclasses == True:
if year-2008 < self.population_timepath.shape[1]:
return self.population_timepath[:,year-2008,:]
else:
print "We have only calculated up till year", self.population_timepath.shape[0], "so we are returning data for that year"
return self.population_timepath[-1,:]
else: #if returnclasses == False
if year-2008 < self.population_timepath.shape[1]:
return self.population_timepath[:,year-2008,:].sum(axis=1)
else:
print "We have only calculated up till year", self.population_timepath.shape[0], "so we are returning data for that year"
return self.population_timepath[-1,:].sum(axis=1)
def get_fertility_rate(self,year, returnall = False):
if returnall == True:
return self.fertility_rates
if year-2008 < self.fertility_rates.shape[1]:
return self.fertility_rates[:,year-2008]
else:
print "\nThis data is too far out, so we are returning the steady-state value\n"
return self.fertility_rates[:,-1]
def get_mortality_rate(self, year, returnall = False):
if returnall == True:
return self.mortality_rates
if year-2008 < self.mortality_rates.shape[1]:
return self.mortality_rates[:,year-2008]
else:
print "\nThis data is too far out, so we are returning the steady-state value\n"
return self.mortality_rates[:,-1]
def get_total_netmigration_rate(self):
return self.net_migration.sum(axis=1)
def get_kids_mat(self, year, returnall = False):
if returnall == True:
return self.KID_mat
return self.KID_mat[:, year-2008, :]
def compare_countries(countries, year):
plt.clf()
plt.suptitle("Demographics "+str(year))
plt.subplot(1, 2, 1)
for r, region in enumerate(countries):
plt.plot(range(91),region.get_population_distribution(year))
plt.title("Age Distribution")
plt.xlabel('Age')
plt.ylabel('Population')
plt.grid()
plt.subplot(1,2,2)
for r, region in enumerate(countries):
plt.plot(range(2008, year+1), region.get_total_population(year)/1000000)
plt.title("Total Population (Millions)")
plt.xlim(2008, year)
plt.xlabel('Year')
plt.legend(countries, loc = "upper left", prop={'size':11})
plt.grid()
plt.show()
plt.clf()
plt.suptitle(" Data for the Year "+str(year))
plt.legend(regionlist, loc = "lower right")
plt.subplot(2, 2, 1)
for r, region in enumerate(countries):
plt.plot(range(23,46),region.get_fertility_rate(year))
plt.title("Fertility")
plt.xlim(23, 46)
plt.grid()
plt.subplot(2, 2, 2)
for r, region in enumerate(countries):
plt.plot(range(68,91),region.get_mortality_rate(year))
plt.title("Mortality")
plt.xlim(68, 89)
plt.ylim(0, 1)
plt.legend(countries, loc = "upper left", prop={'size':11})
plt.grid()
plt.subplot(2, 2, 3)
for r, region in enumerate(countries):
plt.plot(range(65),region.get_total_netmigration_rate())
plt.title("Net Migration")
plt.grid()
plt.show()
plt.clf()
USA = Region("USA", 1)
EU = Region("EU", 2)
Japan = Region("Japan", 3)
China = Region("China", 4)
India = Region("India", 5)
Russia = Region("Russia", 6)
Korea = Region("Korea", 7)
regionlist = [USA, EU, Japan, China, India, Russia, Korea]
#for index, region in enumerate(regionlist):
#region.simulate_demographics(300)
#Russia.plot_demographics(2058)
Japan.simulate_demographics(300)
Japan.plot_population_distribution([2008, 2138])
print "Done" | mit |
qPCR4vir/orange | Orange/OrangeWidgets/OWBaseWidget.py | 6 | 45693 | #
# OWWidget.py
# Orange Widget
# A General Orange Widget, from which all the Orange Widgets are derived
#
import os
import sys
import cPickle
import logging
import warnings
import shutil
import time
import random
import user
from Orange.utils import environ
from Orange.orng.orngEnviron import directoryNames as old_directory_names
from PyQt4.QtGui import *
from PyQt4.QtCore import *
# Define pyqtConfigure not available in PyQt4 versions prior to 4.6
if not hasattr(QObject, "pyqtConfigure"):
def pyqtConfigure(obj, **kwargs):
meta = obj.metaObject()
for name, val in kwargs.items():
if meta.indexOfProperty(name) >= 0:
obj.setProperty(name, QVariant(val))
elif meta.indexOfSignal(meta.normalizedSignature(name)) >= 0:
obj.connect(obj, SIGNAL(name), val)
QObject.pyqtConfigure = pyqtConfigure
from OWContexts import *
import orange
from orange import ExampleTable
import Orange.utils
from Orange.utils import debugging as orngDebugging
from string import *
from Orange.OrangeCanvas.registry.description import (
Default, NonDefault, Single, Multiple, Explicit, Dynamic,
InputSignal, OutputSignal
)
from Orange.OrangeCanvas.scheme.widgetsscheme import (
SignalLink, WidgetsSignalManager, SignalWrapper
)
import OWGUI
_log = logging.getLogger(__name__)
ERROR = 0
WARNING = 1
TRUE = 1
FALSE = 0
def _deprecation_warning(name):
warnings.warn(
"{0!r} is deprecated. It will be removed in Orange 2.8".format(name),
DeprecationWarning,
stacklevel=2
)
def unisetattr(self, name, value, grandparent):
if "." in name:
names = name.split(".")
lastname = names.pop()
obj = reduce(lambda o, n: getattr(o, n, None), names, self)
else:
lastname, obj = name, self
if not obj:
print "unable to set setting ", name, " to value ", value
else:
if hasattr(grandparent, "__setattr__") and isinstance(obj, grandparent):
grandparent.__setattr__(obj, lastname, value)
else:
setattr(obj, lastname, value)
# obj.__dict__[lastname] = value
controlledAttributes = hasattr(self, "controlledAttributes") and getattr(self, "controlledAttributes", None)
controlCallback = controlledAttributes and controlledAttributes.get(name, None)
if controlCallback:
for callback in controlCallback:
callback(value)
# controlCallback(value)
# controlled things (checkboxes...) never have __attributeControllers
else:
if hasattr(self, "__attributeControllers"):
for controller, myself in self.__attributeControllers.keys():
if getattr(controller, myself, None) != self:
del self.__attributeControllers[(controller, myself)]
continue
controlledAttributes = hasattr(controller, "controlledAttributes") and getattr(controller, "controlledAttributes", None)
if controlledAttributes:
fullName = myself + "." + name
controlCallback = controlledAttributes.get(fullName, None)
if controlCallback:
for callback in controlCallback:
callback(value)
else:
lname = fullName + "."
dlen = len(lname)
for controlled in controlledAttributes.keys():
if controlled[:dlen] == lname:
self.setControllers(value, controlled[dlen:], controller, fullName)
# no break -- can have a.b.c.d and a.e.f.g; needs to set controller for all!
# if there are any context handlers, call the fastsave to write the value into the context
if hasattr(self, "contextHandlers") and hasattr(self, "currentContexts"):
for contextName, contextHandler in self.contextHandlers.items():
contextHandler.fastSave(self.currentContexts.get(contextName), self, name, value)
class ControlledAttributesDict(dict):
def __init__(self, master):
self.master = master
def __setitem__(self, key, value):
if not self.has_key(key):
dict.__setitem__(self, key, [value])
else:
dict.__getitem__(self, key).append(value)
self.master.setControllers(self.master, key, self.master, "")
class AttributeList(list):
pass
class ExampleList(list):
pass
widgetId = 0
_SETTINGS_VERSION_KEY = "__settingsDataVersion"
class OWBaseWidget(QDialog):
def __new__(cls, *arg, **args):
self = QDialog.__new__(cls)
self.currentContexts = {} # the "currentContexts" MUST be the first thing assigned to a widget
self._useContexts = 1 # do you want to use contexts
self._owInfo = 1 # currently disabled !!!
self._owWarning = 1 # do we want to see warnings
self._owError = 1 # do we want to see errors
self._owShowStatus = 0 # do we want to see warnings and errors in status bar area of the widget
self._guiElements = [] # used for automatic widget debugging
for key in args:
if key in ["_owInfo", "_owWarning", "_owError", "_owShowStatus", "_useContexts", "_category", "_settingsFromSchema"]:
self.__dict__[key] = args[key] # we cannot use __dict__.update(args) since we can have many other
return self
def __init__(self, parent=None, signalManager=None, title="Orange BaseWidget", modal=FALSE, savePosition=False, resizingEnabled=1, **args):
if resizingEnabled:
QDialog.__init__(self, parent, Qt.Dialog)
else:
QDialog.__init__(self, parent, Qt.Dialog |
Qt.MSWindowsFixedSizeDialogHint)
# do we want to save widget position and restore it on next load
self.savePosition = savePosition
if savePosition:
self.settingsList = getattr(self, "settingsList", []) + ["widgetShown", "savedWidgetGeometry"]
self.setCaption(title)
self.setFocusPolicy(Qt.StrongFocus)
# XXX: Shadows a base class method. Find all uses where 'parent' is
# being accessed with an instance member lookup and fix them.
self.parent = parent
self.needProcessing = 0 # used by the old (pre v2.7) signalManager
self.signalManager = signalManager
self.inputs = [] # signalName:(dataType, handler, onlySingleConnection)
self.outputs = [] # signalName: dataType
self.wrappers = [] # stored wrappers for widget events
self.linksIn = {} # signalName : (dirty, widgetFrom, handler, signalData)
self.linksOut = {} # signalName: (signalData, id)
self.connections = {} # dictionary where keys are (control, signal) and values are wrapper instances. Used in connect/disconnect
self.controlledAttributes = ControlledAttributesDict(self)
self.progressBarHandler = None # handler for progress bar events
self.processingHandler = None # handler for processing events
self.eventHandler = None
self.callbackDeposit = []
self.startTime = time.time() # used in progressbar
self.widgetStateHandler = None
self.widgetState = {"Info":{}, "Warning":{}, "Error":{}}
if hasattr(self, "contextHandlers"):
for contextHandler in self.contextHandlers.values():
contextHandler.initLocalContext(self)
global widgetId
widgetId += 1
self.widgetId = widgetId
self.asyncCalls = []
self.asyncBlock = False
self.__wasShown = False
self.__progressBarValue = -1
self.__progressState = 0
self.__statusMessage = ""
@property
def widgetDir(self):
# This seems to be the only use of the orngEnviron.directoryNames
# usage (used in various ploting widget to access icons/Dlg_* png)
warnings.warn(
"widgetDir is deprecated. " +
"Use Orange.utils.environ.widget_install_dir",
DeprecationWarning)
return environ.widget_install_dir
def setWidgetIcon(self, iconName):
warnings.warn(
"setWidgetIcon is deprecated and will be removed in the future. "
"Use setWindowIcon instead.",
DeprecationWarning
)
def getIconNames(iconName):
names = []
name, ext = os.path.splitext(iconName)
for num in [16, 32, 42, 60]:
names.append("%s_%d%s" % (name, num, ext))
fullPaths = []
for paths in [(self.widgetDir, name), (self.widgetDir, "icons", name), (os.path.dirname(sys.modules[self.__module__].__file__), "icons", name)]:
for name in names + [iconName]:
fname = os.path.join(*paths)
if os.path.exists(fname):
fullPaths.append(fname)
if fullPaths != []:
break
if len(fullPaths) > 1 and fullPaths[-1].endswith(iconName):
fullPaths.pop() # if we have the new icons we can remove the default icon
return fullPaths
if isinstance(iconName, list):
iconNames = iconName
else:
iconNames = getIconNames(iconName)
icon = QIcon()
for name in iconNames:
pix = QPixmap(name)
icon.addPixmap(pix)
self.setWindowIcon(icon)
def createAttributeIconDict(self):
return OWGUI.getAttributeIcons()
def isDataWithClass(self, data, wantedVarType = None, checkMissing=False):
self.error([1234, 1235, 1236])
if not data:
return 0
if not data.domain.classVar:
self.error(1234, "A data set with a class attribute is required.")
return 0
if wantedVarType and data.domain.classVar.varType != wantedVarType:
self.error(1235, "Unable to handle %s class." % (data.domain.classVar.varType == orange.VarTypes.Discrete and "discrete" or "continuous"))
return 0
if checkMissing and not orange.Preprocessor_dropMissingClasses(data):
self.error(1236, "Unable to handle data set with no known classes")
return 0
return 1
def restoreWidgetPosition(self):
"""
Restore the widget's position from the saved settings.
This is called from the widget's :func:`showEvent`
"""
if self.savePosition:
geometry = getattr(self, "savedWidgetGeometry", None)
restored = False
if geometry is not None:
restored = self.restoreGeometry(QByteArray(geometry))
if restored:
space = qApp.desktop().availableGeometry(self)
frame, geometry = self.frameGeometry(), self.geometry()
# Fix the widget size to fit inside the available space
width = min(space.width() - (frame.width() - geometry.width()), geometry.width())
height = min(space.height() - (frame.height() - geometry.height()), geometry.height())
self.resize(width, height)
# Move the widget to the center of available space if it
# is currently outside it
if not space.contains(self.frameGeometry()):
x = max(0, space.width() / 2 - width / 2)
y = max(0, space.height() / 2 - height / 2)
self.move(x, y)
def restoreWidgetStatus(self):
_deprecation_warning("restoreWidgetStatus")
if self.savePosition and getattr(self, "widgetShown", None):
self.show()
def resizeEvent(self, ev):
QDialog.resizeEvent(self, ev)
# Don't store geometry if the widget is not visible
# (the widget receives the resizeEvent before showEvent and we must not
# overwrite the the savedGeometry before then)
if self.savePosition and self.isVisible():
self.savedWidgetGeometry = str(self.saveGeometry())
def showEvent(self, ev):
QDialog.showEvent(self, ev)
if self.savePosition:
self.widgetShown = 1
if not self.__wasShown:
self.__wasShown = True
self.restoreWidgetPosition()
def hideEvent(self, ev):
if self.savePosition:
self.widgetShown = 0
self.savedWidgetGeometry = str(self.saveGeometry())
QDialog.hideEvent(self, ev)
def closeEvent(self, ev):
if self.savePosition and self.isVisible() and self.__wasShown:
# self.geometry() is 'invalid' (not yet resized/layout) until the
# widget is made explicitly visible or it might be invalid if
# widget was hidden (in this case hideEvent already saved a valid
# geometry).
self.savedWidgetGeometry = str(self.saveGeometry())
QDialog.closeEvent(self, ev)
def wheelEvent(self, event):
""" Silently accept the wheel event. This is to ensure combo boxes
and other controls that have focus don't receive this event unless
the cursor is over them.
"""
event.accept()
def setCaption(self, caption):
if self.parent != None and isinstance(self.parent, QTabWidget):
self.parent.setTabText(self.parent.indexOf(self), caption)
else:
# we have to save caption title in case progressbar will change it
self.captionTitle = unicode(caption)
self.setWindowTitle(caption)
# put this widget on top of all windows
def reshow(self):
self.show()
self.raise_()
self.activateWindow()
def send(self, signalName, value, id = None):
if self.linksOut.has_key(signalName):
self.linksOut[signalName][id] = value
else:
self.linksOut[signalName] = {id:value}
if self.signalManager is not None:
self.signalManager.send(self, signalName, value, id)
def getdeepattr(self, attr, **argkw):
try:
return reduce(lambda o, n: getattr(o, n, None), attr.split("."), self)
except:
if argkw.has_key("default"):
return argkw[default]
else:
raise AttributeError, "'%s' has no attribute '%s'" % (self, attr)
def setSettings(self, settings):
"""
Set/restore the widget settings.
:param dict settings: A settings dictionary.
"""
if settings.get(_SETTINGS_VERSION_KEY, None) == \
getattr(self, "settingsDataVersion", None):
if _SETTINGS_VERSION_KEY in settings:
settings = settings.copy()
del settings[_SETTINGS_VERSION_KEY]
for key in settings:
self.__setattr__(key, settings[key])
def getSettings(self, alsoContexts=True, globalContexts=False):
"""
Return a dictionary with all settings for serialization.
"""
settings = {}
if hasattr(self, "settingsList"):
for name in self.settingsList:
try:
settings[name] = self.getdeepattr(name)
except Exception:
#print "Attribute %s not found in %s widget. Remove it from the settings list." % (name, self.captionTitle)
pass
settings[_SETTINGS_VERSION_KEY] = getattr(self, "settingsDataVersion", None)
if alsoContexts:
self.synchronizeContexts()
contextHandlers = getattr(self, "contextHandlers", {})
for contextHandler in contextHandlers.values():
contextHandler.mergeBack(self)
# settings[contextHandler.localContextName] = contextHandler.globalContexts
# Instead of the above line, I found this; as far as I recall this was a fix
# for some bugs related to, say, Select Attributes not handling the context
# attributes properly, but I dare not add it without understanding what it does.
# Here it is, if these contexts give us any further trouble.
if (contextHandler.syncWithGlobal and contextHandler.globalContexts is getattr(self, contextHandler.localContextName)) or globalContexts:
settings[contextHandler.localContextName] = contextHandler.globalContexts
else:
contexts = getattr(self, contextHandler.localContextName, None)
if contexts:
settings[contextHandler.localContextName] = contexts
###
settings[contextHandler.localContextName+"Version"] = (contextStructureVersion, contextHandler.contextDataVersion)
return settings
def getDefaultSettingsFilename(self):
"""
Return a default widget settings filename.
"""
settings_dir = environ.widget_settings_dir
class_ = type(self)
version = getattr(class_, "settingsDataVersion", None)
if version is not None:
version = ".".join(str(subv) for subv in version)
basename = "%s.%s.%s.pck" % (class_.__module__, class_.__name__,
version)
else:
basename = "%s.%s.pck" % (class_.__module__, class_.__name__)
filename = os.path.join(settings_dir, basename)
if os.path.exists(filename):
return filename
# Try to find the old filename format ({caption}.ini) and
# copy it to the new place
fs_encoding = sys.getfilesystemencoding()
basename = self.captionTitle + ".ini"
legacy_filename = os.path.join(
settings_dir, # is assumed to be a str in FS encoding
basename.encode(fs_encoding))
if os.path.isfile(legacy_filename):
# Copy the old settings file to the new place.
shutil.copy(legacy_filename, filename)
return filename
def getSettingsFile(self, file):
if file is None:
file = self.getDefaultSettingsFilename()
if not os.path.exists(file):
try:
f = open(file, "wb")
cPickle.dump({}, f)
f.close()
except IOError:
return
if isinstance(file, basestring):
if os.path.exists(file):
return open(file, "r")
else:
return file
# Loads settings from the widget's settings file.
def loadSettings(self, file=None):
file = self.getSettingsFile(file)
if file:
try:
settings = cPickle.load(file)
except Exception, ex:
print >> sys.stderr, "Failed to load settings!", repr(ex)
settings = {}
if hasattr(self, "_settingsFromSchema"):
settings.update(self._settingsFromSchema)
# can't close everything into one big try-except since this would mask all errors in the below code
if settings:
if hasattr(self, "settingsList"):
self.setSettings(settings)
contextHandlers = getattr(self, "contextHandlers", {})
for contextHandler in contextHandlers.values():
localName = contextHandler.localContextName
structureVersion, dataVersion = settings.get(localName+"Version", (0, 0))
if (structureVersion < contextStructureVersion or dataVersion < contextHandler.contextDataVersion) \
and settings.has_key(localName):
del settings[localName]
delattr(self, localName)
contextHandler.initLocalContext(self)
if not hasattr(self, "_settingsFromSchema"): #When running stand alone widgets
if contextHandler.syncWithGlobal:
contexts = settings.get(localName, None)
if contexts is not None:
contextHandler.globalContexts = contexts
else:
setattr(self, localName, contextHandler.globalContexts)
def saveSettings(self, file=None):
settings = self.getSettings(globalContexts=True)
if settings:
if file is None:
file = self.getDefaultSettingsFilename()
if isinstance(file, basestring):
file = open(file, "w")
cPickle.dump(settings, file)
# Loads settings from string str which is compatible with cPickle
def loadSettingsStr(self, str):
if str == None or str == "":
return
settings = cPickle.loads(str)
self.setSettings(settings)
contextHandlers = getattr(self, "contextHandlers", {})
for contextHandler in contextHandlers.values():
localName = contextHandler.localContextName
if settings.has_key(localName):
structureVersion, dataVersion = settings.get(localName+"Version", (0, 0))
if structureVersion < contextStructureVersion or dataVersion < contextHandler.contextDataVersion:
del settings[localName]
delattr(self, localName)
contextHandler.initLocalContext(self)
else:
setattr(self, localName, settings[localName])
# return settings in string format compatible with cPickle
def saveSettingsStr(self):
settings = self.getSettings()
return cPickle.dumps(settings)
def onDeleteWidget(self):
"""
Called when the widget is deleted from a scheme by the user.
Subclasses can override this and cleanup any resources they have.
"""
pass
# this function is only intended for derived classes to send appropriate
# signals when all settings are loaded.
# NOTE: This is useless, this does not get called by the base widget at
# any time. The subclasses are expected to call this themselves. It only
# remains if one tries to call the base implementation.
def activateLoadedSettings(self):
pass
def handleNewSignals(self):
# this is called after all new signals have been handled
# implement this in your widget if you want to process something only after you received multiple signals
pass
# ########################################################################
def connect(self, control, signal, method, type=Qt.AutoConnection):
wrapper = SignalWrapper(self, method)
self.connections[(control, signal)] = wrapper # save for possible disconnect
self.wrappers.append(wrapper)
QDialog.connect(control, signal, wrapper, type)
#QWidget.connect(control, signal, method) # ordinary connection useful for dialogs and windows that don't send signals to other widgets
def disconnect(self, control, signal, method=None):
wrapper = self.connections[(control, signal)]
QDialog.disconnect(control, signal, wrapper)
#===============================================================
# The following methods are used only by the old signal manager
# (orngSignalManager) and possibly the 'Saved Applications' from
# the old Canvas.
# ==============================================================
# does widget have a signal with name in inputs
def hasInputName(self, name):
_deprecation_warning("hasInputName")
for input in self.inputs:
if name == input[0]: return 1
return 0
# does widget have a signal with name in outputs
def hasOutputName(self, name):
_deprecation_warning("hasOutputName")
for output in self.outputs:
if name == output[0]: return 1
return 0
def getInputType(self, signalName):
_deprecation_warning("getInputType")
for input in self.inputs:
if input[0] == signalName: return input[1]
return None
def getOutputType(self, signalName):
_deprecation_warning("getOutputType")
for output in self.outputs:
if output[0] == signalName: return output[1]
return None
def signalIsOnlySingleConnection(self, signalName):
_deprecation_warning("signalIsOnlySingleConnection")
for i in self.inputs:
input = InputSignal(*i)
if input.name == signalName: return input.single
def addInputConnection(self, widgetFrom, signalName):
_deprecation_warning("addInputConnection")
for i in range(len(self.inputs)):
if self.inputs[i][0] == signalName:
handler = self.inputs[i][2]
break
existing = []
if self.linksIn.has_key(signalName):
existing = self.linksIn[signalName]
for (dirty, widget, handler, data) in existing:
if widget == widgetFrom: return # no need to add new tuple, since one from the same widget already exists
self.linksIn[signalName] = existing + [(0, widgetFrom, handler, [])] # (dirty, handler, signalData)
#if not self.linksIn.has_key(signalName): self.linksIn[signalName] = [(0, widgetFrom, handler, [])] # (dirty, handler, signalData)
# delete a link from widgetFrom and this widget with name signalName
def removeInputConnection(self, widgetFrom, signalName):
_deprecation_warning("removeInputConnection")
if self.linksIn.has_key(signalName):
links = self.linksIn[signalName]
for i in range(len(self.linksIn[signalName])):
if widgetFrom == self.linksIn[signalName][i][1]:
self.linksIn[signalName].remove(self.linksIn[signalName][i])
if self.linksIn[signalName] == []: # if key is empty, delete key value
del self.linksIn[signalName]
return
# return widget, that is already connected to this singlelink signal. If this widget exists, the connection will be deleted (since this is only single connection link)
def removeExistingSingleLink(self, signal):
_deprecation_warning("removeExistingSingleLink")
for i in self.inputs:
input = InputSignal(*i)
if input.name == signal and not input.single: return None
for signalName in self.linksIn.keys():
if signalName == signal:
widget = self.linksIn[signalName][0][1]
del self.linksIn[signalName]
return widget
return None
# signal manager calls this function when all input signals have updated the data
def processSignals(self):
_deprecation_warning("processSignals")
if self.processingHandler:
self.processingHandler(self, 1) # focus on active widget
newSignal = 0 # did we get any new signals
# we define only a way to handle signals that have defined a handler function
for signal in self.inputs: # we go from the first to the last defined input
key = signal[0]
if self.linksIn.has_key(key):
for i in range(len(self.linksIn[key])):
(dirty, widgetFrom, handler, signalData) = self.linksIn[key][i]
if not (handler and dirty): continue
newSignal = 1
qApp.setOverrideCursor(Qt.WaitCursor)
try:
for (value, id, nameFrom) in signalData:
if self.signalIsOnlySingleConnection(key):
self.printEvent("ProcessSignals: Calling %s with %s" % (handler, value), eventVerbosity = 2)
handler(value)
else:
self.printEvent("ProcessSignals: Calling %s with %s (%s, %s)" % (handler, value, nameFrom, id), eventVerbosity = 2)
handler(value, (widgetFrom, nameFrom, id))
except:
type, val, traceback = sys.exc_info()
sys.excepthook(type, val, traceback) # we pretend that we handled the exception, so that we don't crash other widgets
qApp.restoreOverrideCursor()
self.linksIn[key][i] = (0, widgetFrom, handler, []) # clear the dirty flag
if newSignal == 1:
self.handleNewSignals()
while self.isBlocking():
self.thread().msleep(50)
qApp.processEvents()
if self.processingHandler:
self.processingHandler(self, 0) # remove focus from this widget
self.needProcessing = 0
# set new data from widget widgetFrom for a signal with name signalName
def updateNewSignalData(self, widgetFrom, signalName, value, id, signalNameFrom):
_deprecation_warning("updateNewSignalData")
if not self.linksIn.has_key(signalName): return
for i in range(len(self.linksIn[signalName])):
(dirty, widget, handler, signalData) = self.linksIn[signalName][i]
if widget == widgetFrom:
if self.linksIn[signalName][i][3] == []:
self.linksIn[signalName][i] = (1, widget, handler, [(value, id, signalNameFrom)])
else:
found = 0
for j in range(len(self.linksIn[signalName][i][3])):
(val, ID, nameFrom) = self.linksIn[signalName][i][3][j]
if ID == id and nameFrom == signalNameFrom:
self.linksIn[signalName][i][3][j] = (value, id, signalNameFrom)
found = 1
if not found:
self.linksIn[signalName][i] = (1, widget, handler, self.linksIn[signalName][i][3] + [(value, id, signalNameFrom)])
self.needProcessing = 1
# ############################################
# PROGRESS BAR FUNCTIONS
#: Progress bar value has changed
progressBarValueChanged = pyqtSignal(float)
#: Processing state has changed
processingStateChanged = pyqtSignal(int)
def progressBarInit(self):
"""
Initialize the widget's progress bar (i.e show and set progress to 0%)
"""
self.startTime = time.time()
self.setWindowTitle(self.captionTitle + " (0% complete)")
if self.progressBarHandler:
self.progressBarHandler(self, 0)
if self.__progressState != 1:
self.__progressState = 1
self.processingStateChanged.emit(1)
self.progressBarValue = 0
def progressBarSet(self, value, processEventsFlags=QEventLoop.AllEvents):
"""
Set the current progress bar to `value`.
.. note::
This method will also call `qApp.processEvents` with the
`processEventsFlags` unless the processEventsFlags equals
``None``.
"""
old = self.__progressBarValue
self.__progressBarValue = value
if value > 0:
if self.__progressState != 1:
warnings.warn("progressBarSet() called without a "
"preceding progressBarInit()",
stacklevel=2)
self.__progressState = 1
self.processingStateChanged.emit(1)
usedTime = max(1, time.time() - self.startTime)
totalTime = (100.0 * usedTime) / float(value)
remainingTime = max(0, totalTime - usedTime)
h = int(remainingTime / 3600)
min = int((remainingTime - h * 3600) / 60)
sec = int(remainingTime - h * 3600 - min * 60)
if h > 0:
text = "%(h)d:%(min)02d:%(sec)02d" % vars()
else:
text = "%(min)d:%(sec)02d" % vars()
self.setWindowTitle(self.captionTitle + " (%(value).2f%% complete, remaining time: %(text)s)" % vars())
else:
self.setWindowTitle(self.captionTitle + " (0% complete)")
if self.progressBarHandler:
self.progressBarHandler(self, value)
if old != value:
self.progressBarValueChanged.emit(value)
if processEventsFlags is not None:
qApp.processEvents(processEventsFlags)
def progressBarValue(self):
"""
Current progress bar value (-1 if the progress bar is not initialized).
"""
return self.__progressBarValue if self.__progressState == 1 else -1.0
progressBarValue = pyqtProperty(
float,
fset=lambda self, val:
OWBaseWidget.progressBarSet(self, val, processEventsFlags=None),
fget=progressBarValue
)
processingState = pyqtProperty(int, fget=lambda self: self.__progressState)
def progressBarAdvance(self, value, processEventsFlags=QEventLoop.AllEvents):
self.progressBarSet(self.progressBarValue + value, processEventsFlags)
def progressBarFinished(self):
"""
Reset and hide the progress bar.
"""
self.setWindowTitle(self.captionTitle)
if self.progressBarHandler:
self.progressBarHandler(self, 101)
if self.__progressState != 0:
self.__progressState = 0
self.processingStateChanged.emit(0)
#: Widget's status message has changed.
statusMessageChanged = pyqtSignal(unicode)
def setStatusMessage(self, text):
if self.__statusMessage != text:
self.__statusMessage = text
self.statusMessageChanged.emit(text)
def statusMessage(self):
return self.__statusMessage
# handler must be a function, that receives 2 arguments.
# First is the widget instance, the second is the value between
# -1 and 101
def setProgressBarHandler(self, handler):
_deprecation_warning("setProgressBarHandler")
self.progressBarHandler = handler
def setProcessingHandler(self, handler):
_deprecation_warning("setProcessingHandler")
self.processingHandler = handler
def setEventHandler(self, handler):
_deprecation_warning("setEventHandler")
self.eventHandler = handler
def setWidgetStateHandler(self, handler):
_deprecation_warning("setWidgetStateHandler")
self.widgetStateHandler = handler
# if we are in debug mode print the event into the file
def printEvent(self, text, eventVerbosity=1):
_deprecation_warning("printEvent")
text = self.captionTitle + ": " + text
if eventVerbosity > 0:
_log.debug(text)
else:
_log.info(text)
if self.eventHandler:
self.eventHandler(text, eventVerbosity)
def openWidgetHelp(self):
_deprecation_warning("openWidgetHelp")
if "widgetInfo" in self.__dict__ and hasattr(qApp, "canvasDlg"):
# This widget is on a canvas.
qApp.canvasDlg.helpWindow.showHelpFor(self.widgetInfo, True)
def keyPressEvent(self, e):
if e.key() in (Qt.Key_Help, Qt.Key_F1):
if "widgetInfo" in self.__dict__ and hasattr(qApp, "canvasDlg"):
self.openWidgetHelp()
elif (int(e.modifiers()), e.key()) in OWBaseWidget.defaultKeyActions:
OWBaseWidget.defaultKeyActions[int(e.modifiers()), e.key()](self)
else:
QDialog.keyPressEvent(self, e)
def information(self, id=0, text=None):
self.setState("Info", id, text)
def warning(self, id=0, text=""):
self.setState("Warning", id, text)
def error(self, id=0, text=""):
self.setState("Error", id, text)
def setState(self, stateType, id, text):
changed = 0
if type(id) == list:
for val in id:
if self.widgetState[stateType].has_key(val):
self.widgetState[stateType].pop(val)
changed = 1
else:
if isinstance(id, basestring):
# if we call information(), warning(), or error() function
# with only one parameter - a string - then set id = 0
text = id
id = 0
if not text:
if self.widgetState[stateType].has_key(id):
self.widgetState[stateType].pop(id)
changed = 1
else:
self.widgetState[stateType][id] = text
changed = 1
if changed:
if self.widgetStateHandler:
self.widgetStateHandler()
elif text:
_log.info(stateType + " - " + text)
if type(id) == list:
for i in id:
self.emit(SIGNAL("widgetStateChanged(QString, int, QString)"),
QString(stateType), i,QString(""))
else:
self.emit(SIGNAL("widgetStateChanged(QString, int, QString)"),
QString(stateType), id, QString(text or ""))
return changed
widgetStateChanged = pyqtSignal(QString, int, QString)
"""Widget state has changed first arg is the state type
('Info', 'Warning' or 'Error') the second is the message id
and finally the message string."""
def widgetStateToHtml(self, info=True, warning=True, error=True):
pixmaps = self.getWidgetStateIcons()
items = []
iconPath = {"Info": "canvasIcons:information.png",
"Warning": "canvasIcons:warning.png",
"Error": "canvasIcons:error.png"}
for show, what in [(info, "Info"), (warning, "Warning"),(error, "Error")]:
if show and self.widgetState[what]:
items.append('<img src="%s" style="float: left;"> %s' % (iconPath[what], "\n".join(self.widgetState[what].values())))
return "<br>".join(items)
@classmethod
def getWidgetStateIcons(cls):
if not hasattr(cls, "_cached__widget_state_icons"):
iconsDir = os.path.join(environ.canvas_install_dir, "icons")
QDir.addSearchPath("canvasIcons",os.path.join(environ.canvas_install_dir,
"icons/"))
info = QPixmap("canvasIcons:information.png")
warning = QPixmap("canvasIcons:warning.png")
error = QPixmap("canvasIcons:error.png")
cls._cached__widget_state_icons = \
{"Info": info, "Warning": warning, "Error": error}
return cls._cached__widget_state_icons
def synchronizeContexts(self):
if hasattr(self, "contextHandlers"):
for contextName, handler in self.contextHandlers.items():
context = self.currentContexts.get(contextName, None)
if context:
handler.settingsFromWidget(self, context)
def openContext(self, contextName="", *arg):
if not self._useContexts:
return
handler = self.contextHandlers[contextName]
context = handler.openContext(self, *arg)
if context:
self.currentContexts[contextName] = context
def closeContext(self, contextName=""):
if not self._useContexts:
return
curcontext = self.currentContexts.get(contextName)
if curcontext:
self.contextHandlers[contextName].closeContext(self, curcontext)
del self.currentContexts[contextName]
def settingsToWidgetCallback(self, handler, context):
pass
def settingsFromWidgetCallback(self, handler, context):
pass
def setControllers(self, obj, controlledName, controller, prefix):
while obj:
if prefix:
# print "SET CONTROLLERS: %s %s + %s" % (obj.__class__.__name__, prefix, controlledName)
if obj.__dict__.has_key("attributeController"):
obj.__dict__["__attributeControllers"][(controller, prefix)] = True
else:
obj.__dict__["__attributeControllers"] = {(controller, prefix): True}
parts = controlledName.split(".", 1)
if len(parts) < 2:
break
obj = getattr(obj, parts[0], None)
prefix += parts[0]
controlledName = parts[1]
def __setattr__(self, name, value):
return unisetattr(self, name, value, QDialog)
defaultKeyActions = {}
if sys.platform == "darwin":
defaultKeyActions = {
(Qt.ControlModifier, Qt.Key_M): lambda self: self.showMaximized if self.isMinimized() else self.showMinimized(),
(Qt.ControlModifier, Qt.Key_W): lambda self: self.setVisible(not self.isVisible())}
def scheduleSignalProcessing(self):
"""
Schedule signal processing by the signal manager.
..note:: The processing is already scheduled at the most appropriate
time so you should have few uses for this method.
"""
_deprecation_warning("scheduleSignalProcessing")
if self.signalManager is not None:
self.signalManager.scheduleSignalProcessing(self)
def setBlocking(self, state=True):
""" Set blocking flag for this widget. While this flag is set this
widget and all its descendants will not receive any new signals from
the signal manager
"""
self.asyncBlock = state
self.emit(SIGNAL("blockingStateChanged(bool)"), self.asyncBlock)
if not self.isBlocking():
self.scheduleSignalProcessing()
def isBlocking(self):
""" Is this widget blocking signal processing. Widget is blocking if
asyncBlock value is True or any AsyncCall objects in asyncCalls list
has blocking flag set
"""
return self.asyncBlock or any(a.blocking for a in self.asyncCalls)
def asyncExceptionHandler(self, (etype, value, tb)):
sys.excepthook(etype, value, tb)
def asyncFinished(self, async, string):
""" Remove async from asyncCalls, update blocking state
"""
index = self.asyncCalls.index(async)
async = self.asyncCalls.pop(index)
if async.blocking and not self.isBlocking():
# if we are responsible for unblocking
self.emit(SIGNAL("blockingStateChanged(bool)"), False)
self.scheduleSignalProcessing()
async.disconnect(async, SIGNAL("finished(PyQt_PyObject, QString)"), self.asyncFinished)
self.emit(SIGNAL("asyncCallsStateChange()"))
def asyncCall(self, func, args=(), kwargs={}, name=None, onResult=None, onStarted=None, onFinished=None, onError=None, blocking=True, thread=None, threadPool=None):
""" Return an OWConcurent.AsyncCall object func, args and kwargs
set and signals connected.
"""
_deprecation_warning("asyncCall")
from functools import partial
from OWConcurrent import AsyncCall
asList = lambda slot: slot if isinstance(slot, list) else ([slot] if slot else [])
onResult = asList(onResult)
onStarted = asList(onStarted) #+ [partial(self.setBlocking, True)]
onFinished = asList(onFinished) #+ [partial(self.blockSignals, False)]
onError = asList(onError) or [self.asyncExceptionHandler]
async = AsyncCall(func, args, kwargs, thread=thread, threadPool=threadPool)
async.name = name if name is not None else ""
for slot in onResult:
async.connect(async, SIGNAL("resultReady(PyQt_PyObject)"), slot, Qt.QueuedConnection)
for slot in onStarted:
async.connect(async, SIGNAL("starting()"), slot, Qt.QueuedConnection)
for slot in onFinished:
async.connect(async, SIGNAL("finished(QString)"), slot, Qt.QueuedConnection)
for slot in onError:
async.connect(async, SIGNAL("unhandledException(PyQt_PyObject)"), slot, Qt.QueuedConnection)
self.addAsyncCall(async, blocking)
return async
def addAsyncCall(self, async, blocking=True):
""" Add AsyncCall object to asyncCalls list (will be removed
once it finishes processing).
"""
_deprecation_warning("addAsyncCall")
async.connect(async, SIGNAL("finished(PyQt_PyObject, QString)"), self.asyncFinished)
async.blocking = blocking
if blocking:
# if we are responsible for blocking
state = any(a.blocking for a in self.asyncCalls)
self.asyncCalls.append(async)
if not state:
self.emit(SIGNAL("blockingStateChanged(bool)"), True)
else:
self.asyncCalls.append(async)
self.emit(SIGNAL("asyncCallsStateChange()"))
def blocking(method):
""" Return method that sets blocking flag while executing
"""
from functools import wraps
@wraps(method)
def wrapper(self, *args, **kwargs):
old = self._blocking
self.setBlocking(True)
try:
return method(self, *args, **kwargs)
finally:
self.setBlocking(old)
if __name__ == "__main__":
a = QApplication(sys.argv)
oww = OWBaseWidget()
oww.show()
a.exec_()
oww.saveSettings()
| gpl-3.0 |
maiklos-mirrors/jfx78 | modules/web/src/main/native/Tools/Scripts/webkitpy/common/net/statusserver_unittest.py | 124 | 2214 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.net.statusserver import StatusServer
from webkitpy.common.system.outputcapture import OutputCaptureTestCaseBase
from webkitpy.common.net.web_mock import MockBrowser
class StatusServerTest(OutputCaptureTestCaseBase):
def test_url_for_issue(self):
mock_browser = MockBrowser()
status_server = StatusServer(browser=mock_browser, bot_id='123')
status_server.update_status('queue name', 'the status')
self.assertEqual('queue name', mock_browser.params['queue_name'])
self.assertEqual('the status', mock_browser.params['status'])
self.assertEqual('123', mock_browser.params['bot_id'])
| gpl-2.0 |
TheAlgorithms/Python | project_euler/problem_050/sol1.py | 1 | 1931 | """
Project Euler Problem 50: https://projecteuler.net/problem=50
Consecutive prime sum
The prime 41, can be written as the sum of six consecutive primes:
41 = 2 + 3 + 5 + 7 + 11 + 13
This is the longest sum of consecutive primes that adds to a prime below
one-hundred.
The longest sum of consecutive primes below one-thousand that adds to a prime,
contains 21 terms, and is equal to 953.
Which prime, below one-million, can be written as the sum of the most
consecutive primes?
"""
from typing import List
def prime_sieve(limit: int) -> List[int]:
"""
Sieve of Erotosthenes
Function to return all the prime numbers up to a number 'limit'
https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
>>> prime_sieve(3)
[2]
>>> prime_sieve(50)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
"""
is_prime = [True] * limit
is_prime[0] = False
is_prime[1] = False
is_prime[2] = True
for i in range(3, int(limit ** 0.5 + 1), 2):
index = i * 2
while index < limit:
is_prime[index] = False
index = index + i
primes = [2]
for i in range(3, limit, 2):
if is_prime[i]:
primes.append(i)
return primes
def solution(ceiling: int = 1_000_000) -> int:
"""
Returns the biggest prime, below the celing, that can be written as the sum
of consecutive the most consecutive primes.
>>> solution(500)
499
>>> solution(1_000)
953
>>> solution(10_000)
9521
"""
primes = prime_sieve(ceiling)
length = 0
largest = 0
for i in range(len(primes)):
for j in range(i + length, len(primes)):
sol = sum(primes[i:j])
if sol >= ceiling:
break
if sol in primes:
length = j - i
largest = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| mit |
tiancj/emesene | emesene/e3/papylib/papyon/papyon/util/async.py | 6 | 1364 | # -*- coding: utf-8 -*-
#
# papyon - a python client library for Msn
#
# Copyright (C) 2010 Collabora Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import logging
logger = logging.getLogger("papyon.util.async")
__all__ = ["run"]
def is_valid_callback(callback):
return (isinstance(callback, tuple) and len(callback) > 0 and
callback[0] is not None and callable(callback[0]))
def run(callback, *args):
if callback is None:
return
if not is_valid_callback(callback):
import traceback
traceback.print_stack()
logger.error("Invalid callback %s" % repr(callback))
return
args = tuple(args) + tuple(callback[1:])
callback[0](*args)
| gpl-3.0 |
unseenlaser/python-for-android | python3-alpha/extra_modules/gdata/acl/data.py | 98 | 1893 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Google Access Control List (ACL) Extension"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import atom.data
import gdata.data
import gdata.opensearch.data
GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s'
class AclRole(atom.core.XmlElement):
"""Describes the role of an entry in an access control list."""
_qname = GACL_TEMPLATE % 'role'
value = 'value'
class AclAdditionalRole(atom.core.XmlElement):
"""Describes an additionalRole element."""
_qname = GACL_TEMPLATE % 'additionalRole'
value = 'value'
class AclScope(atom.core.XmlElement):
"""Describes the scope of an entry in an access control list."""
_qname = GACL_TEMPLATE % 'scope'
type = 'type'
value = 'value'
class AclWithKey(atom.core.XmlElement):
"""Describes a key that can be used to access a document."""
_qname = GACL_TEMPLATE % 'withKey'
key = 'key'
role = AclRole
additional_role = AclAdditionalRole
class AclEntry(gdata.data.GDEntry):
"""Describes an entry in a feed of an access control list (ACL)."""
scope = AclScope
role = AclRole
with_key = AclWithKey
additional_role = AclAdditionalRole
class AclFeed(gdata.data.GDFeed):
"""Describes a feed of an access control list (ACL)."""
entry = [AclEntry]
| apache-2.0 |
GeoNode/geonode | geonode/tests/csw.py | 2 | 17095 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from .base import GeoNodeBaseTestSupport
import os
import glob
import gisdata
import logging
from lxml import etree
from owslib import fes
from urllib.parse import urljoin
from owslib.etree import etree as dlxml
from django.conf import settings
from geonode import geoserver
from geonode.utils import check_ogc_backend
from geonode.catalogue import get_catalogue
from geonode.base.models import ResourceBase
logger = logging.getLogger(__name__)
class GeoNodeCSWTest(GeoNodeBaseTestSupport):
"""Tests geonode.catalogue app/module"""
def test_csw_base(self):
"""Verify that GeoNode works against any CSW"""
csw = get_catalogue(
backend={
'ENGINE': 'geonode.catalogue.backends.pycsw_local',
'URL': urljoin('http://localhost:8001/', '/catalogue/csw'),
},
skip_caps=False)
self.assertEqual(
csw.catalogue.url,
urljoin('http://localhost:8001/', '/catalogue/csw')
)
# test that OGC:CSW URLs are identical to what is defined in GeoNode
for op in csw.catalogue.operations:
for method in op.methods:
self.assertEqual(
csw.catalogue.url,
method['url'],
'Expected GeoNode URL to be equal to all CSW URLs')
# test that OGC:CSW 2.0.2 is supported
self.assertEqual(csw.catalogue.version, '2.0.2',
'Expected "2.0.2" as a supported version')
# test that transactions are supported
if csw.catalogue.type != 'pycsw_local':
self.assertTrue(
'Transaction' in [
o.name for o in csw.catalogue.operations],
'Expected Transaction to be a supported operation')
# test that gmd:MD_Metadata is a supported typename
for o in csw.catalogue.operations:
if o.name == 'GetRecords':
typenames = o.parameters['typeNames']['values']
self.assertTrue(
'gmd:MD_Metadata' in typenames,
'Expected "gmd:MD_Metadata" to be a supported typeNames value')
# test that http://www.isotc211.org/2005/gmd is a supported output
# schema
for o in csw.catalogue.operations:
if o.name == 'GetRecords':
outputschemas = o.parameters['outputSchema']['values']
self.assertTrue(
'http://www.isotc211.org/2005/gmd' in outputschemas,
'Expected "http://www.isotc211.org/2005/gmd" to be a supported outputSchema value')
def test_csw_search_count(self):
"""Verify that GeoNode CSW can handle search counting"""
csw = get_catalogue(
backend={
'ENGINE': 'geonode.catalogue.backends.pycsw_local',
'URL': urljoin('http://localhost:8001/', '/catalogue/csw'),
},
skip_caps=False)
self.assertEqual(
csw.catalogue.url,
urljoin('http://localhost:8001/', '/catalogue/csw')
)
# get all records
csw.catalogue.getrecords(typenames='csw:Record')
self.assertEqual(
csw.catalogue.results['matches'],
16,
'Expected 16 records')
# get all ISO records, test for numberOfRecordsMatched
csw.catalogue.getrecords(typenames='gmd:MD_Metadata')
self.assertEqual(
csw.catalogue.results['matches'],
16,
'Expected 16 records against ISO typename')
# Make sure it currently counts both published and unpublished ones too
try:
ResourceBase.objects.filter(is_published=True).update(is_published=False)
# get all ISO records, test for numberOfRecordsMatched
csw.catalogue.getrecords(typenames='gmd:MD_Metadata')
self.assertEqual(
csw.catalogue.results['matches'],
16,
'Expected 16 records against ISO typename')
finally:
ResourceBase.objects.filter(is_published=False).update(is_published=True)
def test_csw_outputschema_dc(self):
"""Verify that GeoNode CSW can handle ISO metadata with Dublin Core outputSchema"""
csw = get_catalogue()
# search for 'san_andres_y_providencia_location', output as Dublin Core
csw.catalogue.getrecords(
typenames='gmd:MD_Metadata',
keywords=['%san_andres_y_providencia_location%'],
outputschema='http://www.opengis.net/cat/csw/2.0.2',
esn='full')
record = list(csw.catalogue.records.values())[0]
# test that the ISO title maps correctly in Dublin Core
self.assertEqual(record.title, "San Andres Y Providencia Location")
# test that the ISO abstract maps correctly in Dublin Core
self.assertEqual(record.abstract, 'No abstract provided')
# test for correct service link articulation
for link in record.references:
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
if link['scheme'] == 'OGC:WMS':
self.assertEqual(link['url'], f"{settings.GEOSERVER_PUBLIC_LOCATION}ows")
elif link['scheme'] == 'OGC:WFS':
self.assertEqual(link['url'], f"{settings.GEOSERVER_PUBLIC_LOCATION}ows")
elif link['scheme'] == 'OGC:WCS':
self.assertEqual(link['url'], f"{settings.GEOSERVER_PUBLIC_LOCATION}ows")
def test_csw_outputschema_iso(self):
"""Verify that GeoNode CSW can handle ISO metadata with ISO outputSchema"""
csw = get_catalogue()
# search for 'san_andres_y_providencia_location', output as Dublin Core
csw.catalogue.getrecords(
typenames='gmd:MD_Metadata',
keywords=['%san_andres_y_providencia_location%'],
outputschema='http://www.isotc211.org/2005/gmd',
esn='full')
record = list(csw.catalogue.records.values())[0]
# test that the ISO title maps correctly in Dublin Core
self.assertEqual(record.identification.title, "San Andres Y Providencia Location")
# test that the ISO abstract maps correctly in Dublin Core
self.assertEqual(record.identification.abstract, 'No abstract provided')
# test BBOX properties in Dublin Core
from decimal import Decimal
self.assertAlmostEqual(Decimal(record.identification.bbox.minx), Decimal('-81.8593555'), places=3)
self.assertAlmostEqual(Decimal(record.identification.bbox.miny), Decimal('12.1665322'), places=3)
self.assertAlmostEqual(Decimal(record.identification.bbox.maxx), Decimal('-81.356409'), places=3)
self.assertAlmostEqual(Decimal(record.identification.bbox.maxy), Decimal('13.396306'), places=3)
# test for correct link articulation
for link in record.distribution.online:
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
if link.protocol == 'OGC:WMS':
self.assertEqual(
link.url,
f'{settings.GEOSERVER_PUBLIC_LOCATION}ows',
'Expected a specific OGC:WMS URL')
elif link.protocol == 'OGC:WFS':
self.assertEqual(
link.url,
f'{settings.GEOSERVER_PUBLIC_LOCATION}wfs',
'Expected a specific OGC:WFS URL')
def test_csw_outputschema_dc_bbox(self):
"""Verify that GeoNode CSW can handle ISO metadata BBOX model with Dublin Core outputSchema"""
csw = get_catalogue()
# search for 'san_andres_y_providencia_location', output as Dublin
# Core
csw.catalogue.getrecords(
typenames='gmd:MD_Metadata',
keywords=['san_andres_y_providencia_location'],
outputschema='http://www.opengis.net/cat/csw/2.0.2',
esn='full')
record = list(csw.catalogue.records.values())[0]
# test CRS constructs in Dublin Core
self.assertEqual(record.bbox.crs.code, 4326)
# test BBOX properties in Dublin Core
from decimal import Decimal
logger.debug([Decimal(record.bbox.minx), Decimal(record.bbox.miny),
Decimal(record.bbox.maxx), Decimal(record.bbox.maxy)])
self.assertAlmostEqual(Decimal(record.bbox.minx), Decimal('-81.859356'), places=3)
self.assertAlmostEqual(Decimal(record.bbox.miny), Decimal('12.166532'), places=3)
self.assertAlmostEqual(Decimal(record.bbox.maxx), Decimal('-81.356409'), places=3)
self.assertAlmostEqual(Decimal(record.bbox.maxy), Decimal('13.396306'), places=3)
def test_csw_outputschema_fgdc(self):
"""Verify that GeoNode CSW can handle ISO metadata with FGDC outputSchema"""
csw = get_catalogue()
if csw.catalogue.type in {'pycsw_http', 'pycsw_local'}:
# get all ISO records in FGDC schema
csw.catalogue.getrecords(
typenames='gmd:MD_Metadata',
keywords=['san_andres_y_providencia_location'],
outputschema='http://www.opengis.net/cat/csw/csdgm')
record = list(csw.catalogue.records.values())[0]
# test that the ISO title maps correctly in FGDC
self.assertEqual(record.idinfo.citation.citeinfo['title'], "San Andres Y Providencia Location")
# test that the ISO abstract maps correctly in FGDC
self.assertEqual(record.idinfo.descript.abstract, 'No abstract provided')
def test_csw_query_bbox(self):
"""Verify that GeoNode CSW can handle bbox queries"""
csw = get_catalogue()
bbox = fes.BBox([-140, -70, 80, 70])
try:
csw.catalogue.getrecords2([bbox, ])
logger.debug(csw.catalogue.results)
self.assertEqual(csw.catalogue.results, {'matches': 7, 'nextrecord': 0, 'returned': 7})
except Exception:
# This test seems to borken actually on pycsw
pass
def test_csw_upload_fgdc(self):
"""Verify that GeoNode CSW can handle FGDC metadata upload"""
csw = get_catalogue()
if csw.catalogue.type == 'pycsw_http':
# upload a native FGDC metadata document
md_doc = etree.tostring(
dlxml.fromstring(
open(
os.path.join(
gisdata.GOOD_METADATA,
'sangis.org',
'Census',
'Census_Blockgroup_Pop_Housing.shp.xml')).read()))
csw.catalogue.transaction(
ttype='insert',
typename='fgdc:metadata',
record=md_doc)
# test that FGDC document was successfully inserted
self.assertEqual(csw.catalogue.results['inserted'], 1)
# query against FGDC typename, output FGDC
csw.catalogue.getrecords(typenames='fgdc:metadata')
self.assertEqual(csw.catalogue.results['matches'], 1)
record = list(csw.catalogue.records.values())[0]
# test that the FGDC title maps correctly in DC
self.assertEqual(record.title, "Census_Blockgroup_Pop_Housing")
# test that the FGDC type maps correctly in DC
self.assertEqual(record.type, "vector digital data")
# test CRS constructs in Dublin Core
self.assertEqual(record.bbox.crs.code, 4326)
# test BBOX properties in Dublin Core
from decimal import Decimal
self.assertEqual(Decimal(record.bbox.minx), Decimal('-117.6'))
self.assertEqual(Decimal(record.bbox.miny), Decimal('32.53'))
self.assertEqual(Decimal(record.bbox.maxx), Decimal('-116.08'))
self.assertEqual(Decimal(record.bbox.maxy), Decimal('33.51'))
# query against FGDC typename, return in ISO
csw.catalogue.getrecords(
typenames='fgdc:metadata',
esn='brief',
outputschema='http://www.isotc211.org/2005/gmd')
self.assertEqual(csw.catalogue.results['matches'], 1)
record = list(csw.catalogue.records.values())[0]
# test that the FGDC title maps correctly in ISO
self.assertEqual(record.identification.title, "Census_Blockgroup_Pop_Housing")
# cleanup and delete inserted FGDC metadata document
csw.catalogue.transaction(
ttype='delete',
typename='fgdc:metadata',
cql='fgdc:Title like "Census_Blockgroup_Pop_Housing"')
self.assertEqual(csw.catalogue.results['deleted'], 1)
def test_csw_bulk_upload(self):
"""Verify that GeoNode CSW can handle bulk upload of ISO and FGDC metadata"""
csw = get_catalogue()
if csw.catalogue.type == 'pycsw_http':
identifiers = []
# upload all metadata
for root, dirs, files in os.walk(os.path.join(gisdata.GOOD_METADATA, 'sangis.org')):
for mfile in files:
if mfile.endswith('.xml'):
md_doc = etree.tostring(
dlxml.fromstring(
open(
os.path.join(
root,
mfile)).read()))
csw.catalogue.transaction(
ttype='insert',
typename='fgdc:metadata',
record=md_doc)
identifiers.append(
csw.catalogue.results['insertresults'][0])
for md in glob.glob(os.path.join(gisdata.GOOD_METADATA, 'wustl.edu', '*.xml')):
md_doc = etree.tostring(dlxml.fromstring(open(md).read()))
csw.catalogue.transaction(
ttype='insert',
typename='gmd:MD_Metadata',
record=md_doc)
identifiers.append(csw.catalogue.results['insertresults'][0])
# query against FGDC typename
csw.catalogue.getrecords(typenames='fgdc:metadata')
self.assertEqual(
csw.catalogue.results['matches'],
72,
'Expected 187 records in FGDC model')
# query against ISO typename
csw.catalogue.getrecords(typenames='gmd:MD_Metadata')
self.assertEqual(
csw.catalogue.results['matches'],
115,
'Expected 194 records in ISO model')
# query against FGDC and ISO typename
csw.catalogue.getrecords(typenames='gmd:MD_Metadata fgdc:metadata')
self.assertEqual(
csw.catalogue.results['matches'],
187,
'Expected 381 records total in FGDC and ISO model')
# clean up
for i in identifiers:
csw.catalogue.transaction(ttype='delete', identifier=i)
# def test_layer_delete_from_catalogue(self):
# """Verify that layer is correctly deleted from Catalogue
# """
#
# Test Uploading then Deleting a Shapefile from Catalogue
# shp_file = os.path.join(gisdata.VECTOR_DATA, 'san_andres_y_providencia_poi.shp')
# shp_layer = file_upload(shp_file)
# catalogue = get_catalogue()
# catalogue.remove_record(shp_layer.uuid)
# shp_layer_info = catalogue.get_record(shp_layer.uuid)
# self.assertEqual(shp_layer_info, None, 'Expected no layer info for Shapefile')
#
# Clean up and completely delete the layer
# shp_layer.delete()
#
# Test Uploading then Deleting a TIFF file from GeoNetwork
# tif_file = os.path.join(gisdata.RASTER_DATA, 'test_grid.tif')
# tif_layer = file_upload(tif_file)
# catalogue.remove_record(tif_layer.uuid)
# tif_layer_info = catalogue.get_record(tif_layer.uuid)
# self.assertEqual(tif_layer_info, None, 'Expected no layer info for TIFF file')
#
# Clean up and completely delete the layer
# tif_layer.delete()
| gpl-3.0 |
mborgbrant/hallifornia-font | node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py | 2736 | 6387 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
| mit |
coder-james/mxnet | python/mxnet/optimizer.py | 4 | 30471 | """Weight updating functions."""
import math
import pickle
import logging
import warnings
import numpy
from .ndarray import NDArray, zeros, clip, sqrt, sign, array
from .ndarray import (sgd_update, sgd_mom_update, adam_update, rmsprop_update, rmspropalex_update,
mp_sgd_update, mp_sgd_mom_update)
from .random import normal
class Optimizer(object):
"""The base class inherited by all optimizers.
Parameters
----------
rescale_grad : float, optional
Multiply the gradient with `rescale_grad` before updating. Often
choose to be ``1.0/batch_size``.
param_idx2name : dict from int to string, optional
A dictionary that maps int index to string name.
clip_gradient : float, optional
Clip the gradient by projecting onto the box ``[-clip_gradient, clip_gradient]``.
learning_rate : float, optional
The initial learning rate.
lr_scheduler : LRScheduler, optional
The learning rate scheduler.
wd : float, optional
The weight decay (or L2 regularization) coefficient. Modifies objective
by adding a penalty for having large weights.
sym: Symbol, optional
The Symbol this optimizer is applying to.
begin_num_update : int, optional
The initial number of updates.
"""
def __init__(self, rescale_grad=1., param_idx2name=None, wd=0.,
clip_gradient=None, learning_rate=0.01,
lr_scheduler=None, sym=None, begin_num_update=0):
self.rescale_grad = rescale_grad
self.lr = learning_rate
self.lr_scheduler = lr_scheduler
if lr_scheduler is not None:
self.lr_scheduler.base_lr = learning_rate
self.wd = wd
self.lr_mult = {}
self.wd_mult = {}
self.begin_num_update = begin_num_update
self.num_update = begin_num_update
self._index_update_count = {}
self.clip_gradient = clip_gradient
if param_idx2name is None:
param_idx2name = {}
assert isinstance(param_idx2name, dict), \
'param_idx2name should be a dict of param indexes to names.'
self.idx2name = param_idx2name.copy()
self.sym = sym
self.set_lr_mult({})
self.set_wd_mult({})
opt_registry = {}
@staticmethod
def register(klass):
"""Registers a new optimizer.
Once an optimizer is registered, we can create an instance of this
optimizer with `create_optimizer` later.
Examples
--------
>>> @mx.optimizer.Optimizer.register
... class MyOptimizer(mx.optimizer.Optimizer):
... pass
>>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer')
>>> print(type(optim))
<class '__main__.MyOptimizer'>
"""
assert(isinstance(klass, type))
name = klass.__name__.lower()
if name in Optimizer.opt_registry:
logging.warning('WARNING: New optimizer %s.%s is overriding '
'existing optimizer %s.%s',
klass.__module__, klass.__name__,
Optimizer.opt_registry[name].__module__,
Optimizer.opt_registry[name].__name__)
Optimizer.opt_registry[name] = klass
return klass
@staticmethod
def create_optimizer(name, **kwargs):
"""Instantiates an optimizer with a given name and kwargs.
.. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.
Parameters
----------
name: str
Name of the optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
kwargs: dict
Parameters for the optimizer.
Returns
-------
Optimizer
An instantiated optimizer.
Examples
--------
>>> sgd = mx.optimizer.Optimizer.create_optimizer('sgd')
>>> type(sgd)
<class 'mxnet.optimizer.SGD'>
>>> adam = mx.optimizer.create('adam', learning_rate=.1)
>>> type(adam)
<class 'mxnet.optimizer.Adam'>
"""
if name.lower() in Optimizer.opt_registry:
return Optimizer.opt_registry[name.lower()](**kwargs)
else:
raise ValueError('Cannot find optimizer %s' % name)
def create_state(self, index, weight):
"""Creates auxiliary state for a given weight.
Some optimizers require additional states, e.g. as momentum, in addition
to gradients in order to update weights. This function creates state
for a given weight which will be used in `update`. This function is
called only once for each weight.
Parameters
----------
index : int
An unique index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
"""
def update(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
"""
raise NotImplementedError()
def set_lr_scale(self, args_lrscale): # pylint: disable=unused-argument
"""[DEPRECATED] Sets lr scale. Use set_lr_mult instead."""
raise DeprecationWarning
def set_lr_mult(self, args_lr_mult):
"""Sets an individual learning rate multiplier for each parameter.
If you specify a learning rate multiplier for a parameter, then
the learning rate for the parameter will be set as the product of
the global learning rate `self.lr` and its multiplier.
.. note:: The default learning rate multiplier of a `Variable`
can be set with `lr_mult` argument in the constructor.
Parameters
----------
args_lr_mult : dict of str/int to float
For each of its key-value entries, the learning rate multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.lr_mult = {}
if self.sym is not None:
attr = self.sym.attr_dict()
for name in self.sym.list_arguments():
if name in attr and '__lr_mult__' in attr[name]:
self.lr_mult[name] = float(attr[name]['__lr_mult__'])
self.lr_mult.update(args_lr_mult)
def set_wd_mult(self, args_wd_mult):
"""Sets an individual weight decay multiplier for each parameter.
By default, if `param_idx2name` was provided in the
constructor, the weight decay multipler is set as 0 for all
parameters whose name don't end with ``_weight`` or
``_gamma``.
.. note:: The default weight decay multiplier for a `Variable`
can be set with its `wd_mult` argument in the constructor.
Parameters
----------
args_wd_mult : dict of string/int to float
For each of its key-value entries, the weight decay multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.wd_mult = {}
for n in self.idx2name.values():
if not (n.endswith('_weight') or n.endswith('_gamma')):
self.wd_mult[n] = 0.0
if self.sym is not None:
attr = self.sym.attr_dict()
for name in self.sym.list_arguments():
if name in attr and '__wd_mult__' in attr[name]:
self.wd_mult[name] = float(attr[name]['__wd_mult__'])
self.wd_mult.update(args_wd_mult)
def _update_count(self, index):
"""Updates num_update.
Parameters
----------
index : int
The index to be updated.
"""
if index not in self._index_update_count:
self._index_update_count[index] = self.begin_num_update
self._index_update_count[index] += 1
self.num_update = max(self._index_update_count[index], self.num_update)
def _get_lr(self, index):
"""Gets the learning rate given the index of the weight.
Parameters
----------
index : int
The index corresponding to the weight.
Returns
-------
lr : float
Learning rate for this index.
"""
if self.lr_scheduler is not None:
lr = self.lr_scheduler(self.num_update)
else:
lr = self.lr
if index in self.lr_mult:
lr *= self.lr_mult[index]
elif index in self.idx2name:
lr *= self.lr_mult.get(self.idx2name[index], 1.0)
return lr
def _get_wd(self, index):
"""Gets weight decay for index.
Returns 0 for non-weights if the name of weights are provided for `__init__`.
Parameters
----------
index : int
The index for weight.
Returns
-------
wd : float
Weight decay for this index.
"""
wd = self.wd
if index in self.wd_mult:
wd *= self.wd_mult[index]
elif index in self.idx2name:
wd *= self.wd_mult.get(self.idx2name[index], 1.0)
return wd
# convenience wrapper for Optimizer.Register
register = Optimizer.register # pylint: disable=invalid-name
@register
class SGD(Optimizer):
"""The SGD optimizer with momentum and weight decay.
The optimizer updates the weight by::
state = momentum * state + lr * rescale_grad * clip(grad, clip_gradient) + wd * weight
weight = weight - state
For details of the update algorithm see :class:`~mxnet.ndarray.sgd_update` and
:class:`~mxnet.ndarray.sgd_mom_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
multi_precision: bool, optional
Flag to control the internal precision of the optimizer.
``False`` results in using the same precision as the weights (default),
``True`` makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.
Turning this on can improve convergence and accuracy when training with float16.
"""
def __init__(self, momentum=0.0, multi_precision=False, **kwargs):
super(SGD, self).__init__(**kwargs)
self.momentum = momentum
self.multi_precision = multi_precision
def create_state(self, index, weight):
momentum = None
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = array(weight, ctx=weight.context, dtype=numpy.float32)
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=numpy.float32)
return (momentum, weight_master_copy)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"SGD optimizer")
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype)
return momentum
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
use_multi_precision = isinstance(state, (list, tuple))
if not use_multi_precision:
if state is not None:
sgd_mom_update(weight, grad, state, out=weight,
lr=lr, wd=wd, **kwargs)
else:
sgd_update(weight, grad, out=weight,
lr=lr, wd=wd, **kwargs)
else:
if state[0] is not None:
mp_sgd_mom_update(weight, grad, state[0], state[1], out=weight,
lr=lr, wd=wd, **kwargs)
else:
mp_sgd_update(weight, grad, state[1], out=weight,
lr=lr, wd=wd, **kwargs)
@register
class DCASGD(Optimizer):
"""The DCASGD optimizer.
This class implements the optimizer described in *Asynchronous Stochastic Gradient Descent
with Delay Compensation for Distributed Deep Learning*,
available at https://arxiv.org/abs/1609.08326.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
lamda : float, optional
Scale DC value.
"""
def __init__(self, momentum=0.0, lamda=0.04, **kwargs):
super(DCASGD, self).__init__(**kwargs)
self.momentum = momentum
self.weight_previous = {}
self.lamda = lamda
def create_state(self, index, weight):
if self.momentum == 0.0:
return (None,
weight.copy()) # previous weight
else:
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # momentum
weight.copy()) # previous weight
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
mom, previous_weight = state
if mom:
mom[:] *= self.momentum
mom[:] += -lr * (grad + wd * weight + self.lamda \
* grad * grad * (weight - previous_weight))
else:
assert(self.momentum == 0.0)
mom = -lr * (grad + wd * weight + self.lamda \
* grad * grad * (weight - previous_weight))
previous_weight[:] = weight
weight[:] += mom
@register
class NAG(SGD):
"""Nesterov accelerated SGD.
This optimizer updates each weight by::
state = momentum * state + grad + wd * weight
weight = weight - (lr * (grad + momentum * state))
This optimizer accepts the same arguments as :class:`.SGD`.
"""
def __init__(self, **kwargs):
super(NAG, self).__init__(**kwargs)
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
if state is not None:
mom = state
mom[:] *= self.momentum
grad += wd * weight
mom[:] += grad
grad[:] += self.momentum * mom
weight[:] += -lr * grad
else:
assert self.momentum == 0.0
weight[:] += -lr * (grad + wd * weight)
@register
class SGLD(Optimizer):
"""Stochastic Gradient Riemannian Langevin Dynamics.
This class implements the optimizer described in the paper *Stochastic Gradient
Riemannian Langevin Dynamics on the Probability Simplex*, available at
https://papers.nips.cc/paper/4883-stochastic-gradient-riemannian-langevin-dynamics-on-the-probability-simplex.pdf.
"""
def __init__(self, **kwargs):
super(SGLD, self).__init__(**kwargs)
def create_state(self, index, weight):
return None
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
weight[:] += - lr/2 * (grad + wd * weight) + normal(0, math.sqrt(lr),
weight.shape, weight.context)
@register # pylint: disable=invalid-name
class ccSGD(SGD):
"""[DEPRECATED] Same as `SGD`. Left here for backward compatibility."""
def __init__(self, *args, **kwargs):
super(ccSGD, self).__init__(*args, **kwargs)
@register
class Adam(Optimizer):
"""The Adam optimizer.
This class implements the optimizer described in *Adam: A Method for
Stochastic Optimization*, available at http://arxiv.org/abs/1412.6980.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
For details of the update algorithm, see :class:`ndarray.adam_update`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
epsilon : float, optional
Small value to avoid division by 0.
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
**kwargs):
super(Adam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
t = self._index_update_count[index]
coef1 = 1. - self.beta1**t
coef2 = 1. - self.beta2**t
lr *= math.sqrt(coef2)/coef1
kwargs = {'beta1': self.beta1, 'beta2': self.beta2, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad}
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
mean, var = state
adam_update(weight, grad, mean, var, out=weight,
lr=lr, wd=wd, **kwargs)
@register
class AdaGrad(Optimizer):
"""AdaGrad optimizer.
This class implements the AdaGrad optimizer described in *Adaptive Subgradient
Methods for Online Learning and Stochastic Optimization*, and available at
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
eps: float, optional
Small value to avoid division by 0.
"""
def __init__(self, eps=1e-7, **kwargs):
super(AdaGrad, self).__init__(**kwargs)
self.float_stable_eps = eps
def create_state(self, index, weight):
return zeros(weight.shape, weight.context) # history
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
history = state
history[:] += (grad * grad)
weight[:] += -lr * (grad / sqrt(history + self.float_stable_eps) + wd * weight)
@register
class RMSProp(Optimizer):
"""The RMSProp optimizer.
Two versions of RMSProp are implemented:
If ``centered=False``, we follow
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by
Tieleman & Hinton, 2012.
For details of the update algorithm see :class:`~mxnet.ndarray.rmsprop_update`.
If ``centered=True``, we follow http://arxiv.org/pdf/1308.0850v5.pdf (38)-(45)
by Alex Graves, 2013.
For details of the update algorithm see :class:`~mxnet.ndarray.rmspropalex_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
gamma1: float, optional
A decay factor of moving average over past squared gradient.
gamma2: float, optional
A "momentum" factor. Only used if `centered`=``True``.
epsilon : float, optional
Small value to avoid division by 0.
centered : bool, optional
Flag to control which version of RMSProp to use.
``True`` will use Graves's version of `RMSProp`,
``False`` will use Tieleman & Hinton's version of `RMSProp`.
clip_weights : float, optional
Clips weights into range ``[-clip_weights, clip_weights]``.
"""
def __init__(self, learning_rate=0.001, gamma1=0.9, gamma2=0.9,
epsilon=1e-8, centered=False, clip_weights=None, **kwargs):
super(RMSProp, self).__init__(learning_rate=learning_rate, **kwargs)
self.gamma1 = gamma1
self.gamma2 = gamma2
self.centered = centered
self.epsilon = epsilon
self.clip_weights = clip_weights
def create_state(self, index, weight):
if self.centered:
return (
zeros(weight.shape, weight.context), # n
zeros(weight.shape, weight.context), # g
zeros(weight.shape, weight.context)) # delta
else:
return (zeros(weight.shape, weight.context), ) # n
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
kwargs = {'gamma1': self.gamma1, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad}
if self.centered:
kwargs['gamma2'] = self.gamma2
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if self.clip_weights:
kwargs['clip_weights'] = self.clip_weights
if not self.centered:
(n, ) = state
rmsprop_update(
weight, grad, n, out=weight, lr=lr, wd=wd, **kwargs)
else:
n, g, delta = state
rmspropalex_update(weight, grad, n, g, delta, out=weight,
lr=lr, wd=wd, **kwargs)
@register
class AdaDelta(Optimizer):
"""The AdaDelta optimizer.
This class implements AdaDelta, an optimizer described in *ADADELTA: An adaptive
learning rate method*, available at https://arxiv.org/abs/1212.5701.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
rho: float
Decay rate for both squared gradients and delta.
epsilon : float
Small value to avoid division by 0.
"""
def __init__(self, rho=0.90, epsilon=1e-5, **kwargs):
super(AdaDelta, self).__init__(**kwargs)
self.rho = rho
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context), # accumulated g
zeros(weight.shape, weight.context)) # accumulated delta
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
wd = self._get_wd(index)
self._update_count(index)
# preprocess grad
grad *= self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# accumulated g and delta initlization
acc_g, acc_delta = state
# update g, delta
acc_g[:] = self.rho * acc_g + (1. - self.rho) * grad * grad
current_delta = sqrt(acc_delta + self.epsilon) / sqrt(acc_g + self.epsilon) * grad
acc_delta[:] = self.rho * acc_delta + (1. - self.rho) * current_delta * current_delta
# update weight
weight[:] -= current_delta + wd * weight
#pylint: disable=invalid-name
@register
class Ftrl(Optimizer):
"""The Ftrl optimizer.
Referenced from *Ad Click Prediction: a View from the Trenches*, available at
http://dl.acm.org/citation.cfm?id=2488200.
Parameters
----------
lamda1 : float, optional
L1 regularization coefficient.
learning_rate : float, optional
The initial learning rate.
beta : float, optional
Per-coordinate learning rate correlation parameter.
eta :
.. math::
\\eta_{t,i} = \\frac{learningrate}{\\beta+\\sqrt{\\sum_{s=1}^tg_{s,i}^t}}
"""
def __init__(self, lamda1=0.01, learning_rate=0.1, beta=1, **kwargs):
super(Ftrl, self).__init__(**kwargs)
self.lamda1 = lamda1
self.beta = beta
self.lr = learning_rate
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context), # dn
zeros(weight.shape, weight.context)) # n
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
wd = self._get_wd(index)
lr = self._get_lr(index)
# preprocess grad
grad *= self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# accumulated g and delta initialization
dn, n = state
#update dn, n
dn += grad - (sqrt(n + grad * grad) - sqrt(n)) * weight / lr
n += grad * grad
# update weight
weight[:] = (sign(dn) * self.lamda1 - dn) / \
((self.beta + sqrt(n)) / lr + wd) * (NDArray.abs(dn) > self.lamda1)
@register
class Test(Optimizer):
"""The Test optimizer"""
def __init__(self, **kwargs):
super(Test, self).__init__(**kwargs)
def create_state(self, index, weight):
"""Creates a state to duplicate weight."""
return zeros(weight.shape, weight.context)
def update(self, index, weight, grad, state):
"""Performs w += rescale_grad * grad."""
weight[:] += grad * self.rescale_grad
state[:] = weight
# backward compatibility wrapper for Optimizer.CreateOptimizer
create = Optimizer.create_optimizer # pylint: disable=invalid-name
class Updater(object):
"""Updater for kvstore."""
def __init__(self, optimizer):
self.optimizer = optimizer
self.states = {}
self.states_synced = {}
def __call__(self, index, grad, weight):
"""Updates weight given gradient and index."""
if index not in self.states:
self.states[index] = self.optimizer.create_state(index, weight)
self.states_synced[index] = True
elif not self.states_synced[index]:
self.states[index] = \
self.sync_state_context(self.states[index], weight.context)
self.states_synced[index] = True
self.optimizer.update(index, weight, grad, self.states[index])
def sync_state_context(self, state, context):
if isinstance(state, NDArray):
return state.as_in_context(context)
elif isinstance(state, (tuple, list)):
synced_state = (self.sync_state_context(i, context) for i in state)
if isinstance(state, tuple):
return tuple(synced_state)
else:
return list(synced_state)
else:
return state
def set_states(self, states):
"""Sets updater states."""
self.states = pickle.loads(states)
self.states_synced = dict.fromkeys(self.states.keys(), False)
def get_states(self):
"""Gets updater states."""
return pickle.dumps(self.states)
def get_updater(optimizer):
"""Returns a closure of the updater needed for kvstore.
Parameters
----------
optimizer: Optimizer
The optimizer.
Returns
-------
updater: function
The closure of the updater.
"""
return Updater(optimizer)
| apache-2.0 |
samdoran/ansible | lib/ansible/modules/cloud/vmware/vmware_vsan_cluster.py | 70 | 4093 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Russell Teague <rteague2 () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_vsan_cluster
short_description: Configure VSAN clustering on an ESXi host
description:
- This module can be used to configure VSAN clustering on an ESXi host
version_added: 2.0
author: "Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
cluster_uuid:
description:
- Desired cluster UUID
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example command from Ansible Playbook
- name: Configure VMware VSAN Cluster
hosts: deploy_node
gather_facts: False
tags:
- vsan
tasks:
- name: Configure VSAN on first host
vmware_vsan_cluster:
hostname: "{{ groups['esxi'][0] }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
register: vsan_cluster
- name: Configure VSAN on remaining hosts
vmware_vsan_cluster:
hostname: "{{ item }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
cluster_uuid: "{{ vsan_cluster.cluster_uuid }}"
with_items: "{{ groups['esxi'][1:] }}"
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def create_vsan_cluster(host_system, new_cluster_uuid):
host_config_manager = host_system.configManager
vsan_system = host_config_manager.vsanSystem
vsan_config = vim.vsan.host.ConfigInfo()
vsan_config.enabled = True
if new_cluster_uuid is not None:
vsan_config.clusterInfo = vim.vsan.host.ConfigInfo.ClusterInfo()
vsan_config.clusterInfo.uuid = new_cluster_uuid
vsan_config.storageInfo = vim.vsan.host.ConfigInfo.StorageInfo()
vsan_config.storageInfo.autoClaimStorage = True
task = vsan_system.UpdateVsan_Task(vsan_config)
changed, result = wait_for_task(task)
host_status = vsan_system.QueryHostStatus()
cluster_uuid = host_status.uuid
return changed, result, cluster_uuid
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(cluster_uuid=dict(required=False, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
new_cluster_uuid = module.params['cluster_uuid']
try:
content = connect_to_api(module, False)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed, result, cluster_uuid = create_vsan_cluster(host_system, new_cluster_uuid)
module.exit_json(changed=changed, result=result, cluster_uuid=cluster_uuid)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
patrioticcow/MessagesForSkype | packages/win32/bundle/MessagesForSkype/modules/python/1.3.1-beta/Lib/posixpath.py | 34 | 13777 | """Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import stat
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
return s.startswith('/')
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed"""
path = a
for b in p:
if b.startswith('/'):
path = b
elif path == '' or path.endswith('/'):
path += b
else:
path += '/' + b
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
i = p.rfind('/') + 1
head, tail = p[:i], p[i:]
if head and head != '/'*len(head):
head = head.rstrip('/')
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
"""Split the extension from a pathname. Extension is everything from the
last dot to the end. Returns "(root, ext)", either part may be empty."""
i = p.rfind('.')
if i<=p.rfind('/'):
return p, ''
else:
return p[:i], p[i:]
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return '', p
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
n = min(len(s1), len(s2))
for i in xrange(n):
if s1[i] != s2[i]:
return s1[:i]
return s1[:n]
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (os.error, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Does a path exist?
# This is false for dangling symbolic links.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
st = os.stat(path)
except os.error:
return False
return True
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
st = os.lstat(path)
except os.error:
return False
return True
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path.
def isdir(path):
"""Test whether a path is a directory"""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISDIR(st.st_mode)
# Is a path a regular file?
# This follows symbolic links, so both islink() and isfile() can be true
# for the same path.
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISREG(st.st_mode)
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return s1.st_ino == s2.st_ino and \
s1.st_dev == s2.st_dev
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
try:
s1 = os.lstat(path)
s2 = os.lstat(join(path, '..'))
except os.error:
return False # It doesn't exist -- so not a mount point :-)
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
try:
st = os.lstat(name)
except os.error:
continue
if stat.S_ISDIR(st.st_mode):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = os.environ['HOME']
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
global _varprog
if '$' not in path:
return path
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})')
i = 0
while True:
m = _varprog.search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith('{') and name.endswith('}'):
name = name[1:-1]
if name in os.environ:
tail = path[j:]
path = path[:i] + os.environ[name]
i = len(path)
path += tail
else:
i = j
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
if path == '':
return '.'
initial_slashes = path.startswith('/')
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith('//') and not path.startswith('///')):
initial_slashes = 2
comps = path.split('/')
new_comps = []
for comp in comps:
if comp in ('', '.'):
continue
if (comp != '..' or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == '..')):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = '/'.join(comps)
if initial_slashes:
path = '/'*initial_slashes + path
return path or '.'
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
if isabs(filename):
bits = ['/'] + filename.split('/')[1:]
else:
bits = [''] + filename.split('/')
for i in range(2, len(bits)+1):
component = join(*bits[0:i])
# Resolve symbolic links.
if islink(component):
resolved = _resolve_link(component)
if resolved is None:
# Infinite loop -- return original component + rest of the path
return abspath(join(*([component] + bits[i:])))
else:
newpath = join(*([resolved] + bits[i:]))
return realpath(newpath)
return abspath(filename)
def _resolve_link(path):
"""Internal helper function. Takes a path and follows symlinks
until we either arrive at something that isn't a symlink, or
encounter a path we've seen before (meaning that there's a loop).
"""
paths_seen = []
while islink(path):
if path in paths_seen:
# Already seen this path, so we must have a symlink loop
return None
paths_seen.append(path)
# Resolve where the link points to
resolved = os.readlink(path)
if not isabs(resolved):
dir = dirname(path)
path = normpath(join(dir, resolved))
else:
path = normpath(resolved)
return path
supports_unicode_filenames = False
| mit |
devurandom/portage | pym/portage/tests/resolver/test_eapi.py | 13 | 6324 | # Copyright 2010 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
class EAPITestCase(TestCase):
def testEAPI(self):
ebuilds = {
#EAPI-1: IUSE-defaults
"dev-libs/A-1.0": { "EAPI": 0, "IUSE": "+foo" },
"dev-libs/A-1.1": { "EAPI": 1, "IUSE": "+foo" },
"dev-libs/A-1.2": { "EAPI": 2, "IUSE": "+foo" },
"dev-libs/A-1.3": { "EAPI": 3, "IUSE": "+foo" },
"dev-libs/A-1.4": { "EAPI": "4", "IUSE": "+foo" },
#EAPI-1: slot deps
"dev-libs/A-2.0": { "EAPI": 0, "DEPEND": "dev-libs/B:0" },
"dev-libs/A-2.1": { "EAPI": 1, "DEPEND": "dev-libs/B:0" },
"dev-libs/A-2.2": { "EAPI": 2, "DEPEND": "dev-libs/B:0" },
"dev-libs/A-2.3": { "EAPI": 3, "DEPEND": "dev-libs/B:0" },
"dev-libs/A-2.4": { "EAPI": "4", "DEPEND": "dev-libs/B:0" },
#EAPI-2: use deps
"dev-libs/A-3.0": { "EAPI": 0, "DEPEND": "dev-libs/B[foo]" },
"dev-libs/A-3.1": { "EAPI": 1, "DEPEND": "dev-libs/B[foo]" },
"dev-libs/A-3.2": { "EAPI": 2, "DEPEND": "dev-libs/B[foo]" },
"dev-libs/A-3.3": { "EAPI": 3, "DEPEND": "dev-libs/B[foo]" },
"dev-libs/A-3.4": { "EAPI": "4", "DEPEND": "dev-libs/B[foo]" },
#EAPI-2: strong blocks
"dev-libs/A-4.0": { "EAPI": 0, "DEPEND": "!!dev-libs/B" },
"dev-libs/A-4.1": { "EAPI": 1, "DEPEND": "!!dev-libs/B" },
"dev-libs/A-4.2": { "EAPI": 2, "DEPEND": "!!dev-libs/B" },
"dev-libs/A-4.3": { "EAPI": 3, "DEPEND": "!!dev-libs/B" },
"dev-libs/A-4.4": { "EAPI": "4", "DEPEND": "!!dev-libs/B" },
#EAPI-4: slot operator deps
#~ "dev-libs/A-5.0": { "EAPI": 0, "DEPEND": "dev-libs/B:*" },
#~ "dev-libs/A-5.1": { "EAPI": 1, "DEPEND": "dev-libs/B:*" },
#~ "dev-libs/A-5.2": { "EAPI": 2, "DEPEND": "dev-libs/B:*" },
#~ "dev-libs/A-5.3": { "EAPI": 3, "DEPEND": "dev-libs/B:*" },
#~ "dev-libs/A-5.4": { "EAPI": "4", "DEPEND": "dev-libs/B:*" },
#EAPI-4: use dep defaults
"dev-libs/A-6.0": { "EAPI": 0, "DEPEND": "dev-libs/B[bar(+)]" },
"dev-libs/A-6.1": { "EAPI": 1, "DEPEND": "dev-libs/B[bar(+)]" },
"dev-libs/A-6.2": { "EAPI": 2, "DEPEND": "dev-libs/B[bar(+)]" },
"dev-libs/A-6.3": { "EAPI": 3, "DEPEND": "dev-libs/B[bar(+)]" },
"dev-libs/A-6.4": { "EAPI": "4", "DEPEND": "dev-libs/B[bar(+)]" },
#EAPI-4: REQUIRED_USE
"dev-libs/A-7.0": { "EAPI": 0, "IUSE": "foo bar", "REQUIRED_USE": "|| ( foo bar )" },
"dev-libs/A-7.1": { "EAPI": 1, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
"dev-libs/A-7.2": { "EAPI": 2, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
"dev-libs/A-7.3": { "EAPI": 3, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
"dev-libs/A-7.4": { "EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
"dev-libs/B-1": {"EAPI": 1, "IUSE": "+foo"},
}
test_cases = (
ResolverPlaygroundTestCase(["=dev-libs/A-1.0"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-1.1"], success = True, mergelist = ["dev-libs/A-1.1"]),
ResolverPlaygroundTestCase(["=dev-libs/A-1.2"], success = True, mergelist = ["dev-libs/A-1.2"]),
ResolverPlaygroundTestCase(["=dev-libs/A-1.3"], success = True, mergelist = ["dev-libs/A-1.3"]),
ResolverPlaygroundTestCase(["=dev-libs/A-1.4"], success = True, mergelist = ["dev-libs/A-1.4"]),
ResolverPlaygroundTestCase(["=dev-libs/A-2.0"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-2.1"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.1"]),
ResolverPlaygroundTestCase(["=dev-libs/A-2.2"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.2"]),
ResolverPlaygroundTestCase(["=dev-libs/A-2.3"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.3"]),
ResolverPlaygroundTestCase(["=dev-libs/A-2.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.4"]),
ResolverPlaygroundTestCase(["=dev-libs/A-3.0"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-3.1"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-3.2"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.2"]),
ResolverPlaygroundTestCase(["=dev-libs/A-3.3"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.3"]),
ResolverPlaygroundTestCase(["=dev-libs/A-3.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.4"]),
ResolverPlaygroundTestCase(["=dev-libs/A-4.0"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-4.1"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-4.2"], success = True, mergelist = ["dev-libs/A-4.2"]),
ResolverPlaygroundTestCase(["=dev-libs/A-4.3"], success = True, mergelist = ["dev-libs/A-4.3"]),
ResolverPlaygroundTestCase(["=dev-libs/A-4.4"], success = True, mergelist = ["dev-libs/A-4.4"]),
ResolverPlaygroundTestCase(["=dev-libs/A-5.0"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-5.1"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-5.2"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-5.3"], success = False),
# not implemented: EAPI-4: slot operator deps
#~ ResolverPlaygroundTestCase(["=dev-libs/A-5.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-5.4"]),
ResolverPlaygroundTestCase(["=dev-libs/A-6.0"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-6.1"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-6.2"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-6.3"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-6.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-6.4"]),
ResolverPlaygroundTestCase(["=dev-libs/A-7.0"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-7.1"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-7.2"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-7.3"], success = False),
ResolverPlaygroundTestCase(["=dev-libs/A-7.4"], success = True, mergelist = ["dev-libs/A-7.4"]),
)
playground = ResolverPlayground(ebuilds=ebuilds)
try:
for test_case in test_cases:
playground.run_TestCase(test_case)
self.assertEqual(test_case.test_success, True, test_case.fail_msg)
finally:
playground.cleanup()
| gpl-2.0 |
pearsonlab/nipype | nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py | 12 | 1494 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..preprocess import DARTELNorm2MNI
def test_DARTELNorm2MNI_inputs():
input_map = dict(apply_to_files=dict(copyfile=False,
field='mni_norm.data.subjs.images',
mandatory=True,
),
bounding_box=dict(field='mni_norm.bb',
),
flowfield_files=dict(field='mni_norm.data.subjs.flowfields',
mandatory=True,
),
fwhm=dict(field='mni_norm.fwhm',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
matlab_cmd=dict(),
mfile=dict(usedefault=True,
),
modulate=dict(field='mni_norm.preserve',
),
paths=dict(),
template_file=dict(copyfile=False,
field='mni_norm.template',
mandatory=True,
),
use_mcr=dict(),
use_v8struct=dict(min_ver='8',
usedefault=True,
),
voxel_size=dict(field='mni_norm.vox',
),
)
inputs = DARTELNorm2MNI.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_DARTELNorm2MNI_outputs():
output_map = dict(normalization_parameter_file=dict(),
normalized_files=dict(),
)
outputs = DARTELNorm2MNI.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
bmbouter/kombu | kombu/transport/beanstalk.py | 8 | 4043 | """
kombu.transport.beanstalk
=========================
Beanstalk transport.
:copyright: (c) 2010 - 2013 by David Ziegler.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import socket
from kombu.five import Empty
from kombu.utils.encoding import bytes_to_str
from kombu.utils.json import loads, dumps
from . import virtual
try:
import beanstalkc
except ImportError: # pragma: no cover
beanstalkc = None # noqa
DEFAULT_PORT = 11300
__author__ = 'David Ziegler <david.ziegler@gmail.com>'
class Channel(virtual.Channel):
_client = None
def _parse_job(self, job):
item, dest = None, None
if job:
try:
item = loads(bytes_to_str(job.body))
dest = job.stats()['tube']
except Exception:
job.bury()
else:
job.delete()
else:
raise Empty()
return item, dest
def _put(self, queue, message, **kwargs):
extra = {}
priority = self._get_message_priority(message)
ttr = message['properties'].get('ttr')
if ttr is not None:
extra['ttr'] = ttr
self.client.use(queue)
self.client.put(dumps(message), priority=priority, **extra)
def _get(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active) for active in self.client.watching()
if active != queue]
job = self.client.reserve(timeout=1)
item, dest = self._parse_job(job)
return item
def _get_many(self, queues, timeout=1):
# timeout of None will cause beanstalk to timeout waiting
# for a new request
if timeout is None:
timeout = 1
watching = self.client.watching()
[self.client.watch(active) for active in queues
if active not in watching]
[self.client.ignore(active) for active in watching
if active not in queues]
job = self.client.reserve(timeout=timeout)
return self._parse_job(job)
def _purge(self, queue):
if queue not in self.client.watching():
self.client.watch(queue)
[self.client.ignore(active)
for active in self.client.watching()
if active != queue]
count = 0
while 1:
job = self.client.reserve(timeout=1)
if job:
job.delete()
count += 1
else:
break
return count
def _size(self, queue):
return 0
def _open(self):
conninfo = self.connection.client
host = conninfo.hostname or 'localhost'
port = conninfo.port or DEFAULT_PORT
conn = beanstalkc.Connection(host=host, port=port)
conn.connect()
return conn
def close(self):
if self._client is not None:
return self._client.close()
super(Channel, self).close()
@property
def client(self):
if self._client is None:
self._client = self._open()
return self._client
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
default_port = DEFAULT_PORT
connection_errors = (
virtual.Transport.connection_errors + (
socket.error, IOError,
getattr(beanstalkc, 'SocketError', None),
)
)
channel_errors = (
virtual.Transport.channel_errors + (
socket.error, IOError,
getattr(beanstalkc, 'SocketError', None),
getattr(beanstalkc, 'BeanstalkcException', None),
)
)
driver_type = 'beanstalk'
driver_name = 'beanstalkc'
def __init__(self, *args, **kwargs):
if beanstalkc is None:
raise ImportError(
'Missing beanstalkc library (pip install beanstalkc)')
super(Transport, self).__init__(*args, **kwargs)
def driver_version(self):
return beanstalkc.__version__
| bsd-3-clause |
jiangzhuo/kbengine | kbe/src/lib/python/Lib/cmd.py | 167 | 14860 | """A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
"""
import string, sys
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
class Cmd:
"""A simple framework for writing line-oriented command interpreters.
These are often useful for test harnesses, administrative tools, and
prototypes that will later be wrapped in a more sophisticated interface.
A Cmd instance or subclass instance is a line-oriented interpreter
framework. There is no good reason to instantiate Cmd itself; rather,
it's useful as a superclass of an interpreter class you define yourself
in order to inherit Cmd's methods and encapsulate action methods.
"""
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = 1
def __init__(self, completekey='tab', stdin=None, stdout=None):
"""Instantiate a line-oriented interpreter framework.
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically. The optional arguments stdin and stdout
specify alternate input and output file objects; if not specified,
sys.stdin and sys.stdout are used.
"""
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
self.cmdqueue = []
self.completekey = completekey
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = input(self.prompt)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
def precmd(self, line):
"""Hook method executed just before the command line is
interpreted, but after the input prompt is generated and issued.
"""
return line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
return stop
def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
pass
def postloop(self):
"""Hook method executed once when the cmdloop() method is about to
return.
"""
pass
def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if line == 'EOF' :
self.lastcmd = ''
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
self.stdout.write('*** Unknown syntax: %s\n'%line)
def completedefault(self, *ignored):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
return []
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
import readline
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def get_names(self):
# This method used to pull in base class attributes
# at a time dir() didn't do it yet.
return dir(self.__class__)
def complete_help(self, *args):
commands = set(self.completenames(*args))
topics = set(a[5:] for a in self.get_names()
if a.startswith('help_' + args[0]))
return list(commands | topics)
def do_help(self, arg):
'List available commands with "help" or detailed help with "help cmd".'
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n"%str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, list(help.keys()),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("%s\n"%str(self.ruler * len(header)))
self.columnize(cmds, maxcol-1)
self.stdout.write("\n")
def columnize(self, list, displaywidth=80):
"""Display a list of strings as a compact set of columns.
Each column is only as wide as necessary.
Columns are separated by two spaces (one was not legible enough).
"""
if not list:
self.stdout.write("<empty>\n")
return
nonstrings = [i for i in range(len(list))
if not isinstance(list[i], str)]
if nonstrings:
raise TypeError("list[i] not a string for i in %s"
% ", ".join(map(str, nonstrings)))
size = len(list)
if size == 1:
self.stdout.write('%s\n'%str(list[0]))
return
# Try every row count from 1 upwards
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > displaywidth:
break
if totwidth <= displaywidth:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
else:
x = list[i]
texts.append(x)
while texts and not texts[-1]:
del texts[-1]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
self.stdout.write("%s\n"%str(" ".join(texts)))
| lgpl-3.0 |
pkats15/hdt_analyzer | django_test/django_venv/Lib/site-packages/pip/operations/freeze.py | 284 | 3984 | from __future__ import absolute_import
import logging
import re
import pip
from pip.compat import stdlib_pkgs
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions
from pip._vendor import pkg_resources
logger = logging.getLogger(__name__)
# packages to exclude from freeze output
freeze_excludes = stdlib_pkgs + ['setuptools', 'pip', 'distribute']
def freeze(
requirement=None,
find_links=None, local_only=None, user_only=None, skip_regex=None,
find_tags=False,
default_vcs=None,
isolated=False,
wheel_cache=None):
find_links = find_links or []
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex)
dependency_links = []
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt')
)
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
yield '-f %s' % link
installations = {}
for dist in get_installed_distributions(local_only=local_only,
skip=freeze_excludes,
user_only=user_only):
req = pip.FrozenRequirement.from_dist(
dist,
dependency_links,
find_tags=find_tags,
)
installations[req.name] = req
if requirement:
with open(requirement) as req_file:
for line in req_file:
if (not line.strip() or
line.strip().startswith('#') or
(skip_match and skip_match.search(line)) or
line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--extra-index-url'))):
yield line.rstrip()
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(
line,
default_vcs=default_vcs,
isolated=isolated,
wheel_cache=wheel_cache,
)
else:
line_req = InstallRequirement.from_line(
line,
isolated=isolated,
wheel_cache=wheel_cache,
)
if not line_req.name:
logger.info(
"Skipping line because it's not clear what it "
"would install: %s",
line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
elif line_req.name not in installations:
logger.warning(
"Requirement file contains %s, but that package is"
" not installed",
line.strip(),
)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
yield str(installation).rstrip()
| mit |
HurricaneLabs/cujo | src/cujo/http/resources/model.py | 1 | 3580 | import json
import re
from bson.objectid import ObjectId
from cerberus import Validator
from falcon import HTTPBadRequest
from ...schema.errors import InvalidDocumentError
from ..filter_expressions import FilterExpressions
from .base import BaseResource
class ModelResource(BaseResource):
create_replace_schema = {
"objects":{"type":"list", "required":True}
}
def create_or_replace(self, req, resp, bulk_write_operation,
document_collection, translate_validate_func,
ordered=False, pre_save_func=None, post_save_func=None):
body = req.context["json_body"].parsed
post_validator = Validator(self.create_replace_schema)
if not post_validator.validate(body):
#Needs to be parseable
raise HTTPBadRequest("Error validating request", post_validator.errors)
pre_save_context = self.get_pre_save_context(req, resp)
post_save_context = self.get_post_save_context(req, resp)
result, written, failed = document_collection.bulk_write(
body["objects"],
bulk_write_operation,
translate_validate_func,
ordered = ordered,
pre_save_func = pre_save_func,
pre_save_context = pre_save_context,
post_save_func = post_save_func,
post_save_context = post_save_context
)
response_body = {
"objects":result
}
self.set_response_body(response_body, resp)
def get_pre_save_context(self, req, resp):
return {"request":req, "response":resp}
def get_post_save_context(self, req, resp):
return {"request":req, "response":resp}
def get_query_from_params(self, params, allowed_fields, translate_func,
split_char="__", filter_prefix="filter_", filter_expressions=None):
if filter_expressions is None:
filter_expressions = FilterExpressions()
filters = []
for param_name, filter_value in params.items():
if not param_name.startswith(filter_prefix):
continue
filter_chunks = param_name[len(filter_prefix):].split(split_char)
if len(filter_chunks) < 2:
raise ValueError("invalid filter: {0}".format(filter_chunks))
nested_filter_field = self.document_collection.document_schema.translate_one_external_to_core(tuple(filter_chunks[:-1]))
if nested_filter_field not in allowed_fields:
raise ValueError(nested_filter_field)
filter_field = ".".join(nested_filter_field)
filter_expression = filter_chunks[-1:][0]
filters.append((filter_field, filter_expression, filter_value))
if not filters:
return {}
query_operations = []
for filter_field, filter_expression, filter_value_iter in filters:
filter_factory = filter_expressions.param_map[filter_expression]
if not isinstance(filter_value_iter, list):
filter_value_iter = [filter_value_iter]
for filter_value in filter_value_iter:
mongo_filter = filter_factory(filter_field, filter_value)
query_op = {
filter_field:mongo_filter
}
query_operations.append(query_op)
if len(query_operations) > 1:
query = {
"$and":query_operations
}
return query
else:
return query_operations[0]
| mit |
robinro/ansible | lib/ansible/modules/network/illumos/ipadm_if.py | 70 | 6210 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Adam Števko <adam.stevko@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipadm_if
short_description: Manage IP interfaces on Solaris/illumos systems.
description:
- Create, delete, enable or disable IP interfaces on Solaris/illumos
systems.
version_added: "2.2"
author: Adam Števko (@xen0l)
options:
name:
description:
- IP interface name.
required: true
temporary:
description:
- Specifies that the IP interface is temporary. Temporary IP
interfaces do not persist across reboots.
required: false
default: false
choices: [ "true", "false" ]
state:
description:
- Create or delete Solaris/illumos IP interfaces.
required: false
default: "present"
choices: [ "present", "absent", "enabled", "disabled" ]
'''
EXAMPLES = '''
# Create vnic0 interface
- ipadm_if:
name: vnic0
state: enabled
# Disable vnic0 interface
- ipadm_if:
name: vnic0
state: disabled
'''
RETURN = '''
name:
description: IP interface name
returned: always
type: string
sample: "vnic0"
state:
description: state of the target
returned: always
type: string
sample: "present"
temporary:
description: persistence of a IP interface
returned: always
type: boolean
sample: "True"
'''
class IPInterface(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.temporary = module.params['temporary']
self.state = module.params['state']
def interface_exists(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('show-if')
cmd.append(self.name)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def interface_is_disabled(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('show-if')
cmd.append('-o')
cmd.append('state')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(name=self.name, rc=rc, msg=err)
return 'disabled' in out
def create_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('create-if')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def delete_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('delete-if')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def enable_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('enable-if')
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def disable_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('disable-if')
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
temporary=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent',
'present',
'enabled',
'disabled']),
),
supports_check_mode=True
)
interface = IPInterface(module)
rc = None
out = ''
err = ''
result = {}
result['name'] = interface.name
result['state'] = interface.state
result['temporary'] = interface.temporary
if interface.state == 'absent':
if interface.interface_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = interface.delete_interface()
if rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
elif interface.state == 'present':
if not interface.interface_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = interface.create_interface()
if rc is not None and rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
elif interface.state == 'enabled':
if interface.interface_is_disabled():
(rc, out, err) = interface.enable_interface()
if rc is not None and rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
elif interface.state == 'disabled':
if not interface.interface_is_disabled():
(rc, out, err) = interface.disable_interface()
if rc is not None and rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Nirvedh/CoarseCoherence | src/mem/slicc/ast/VarExprAST.py | 31 | 2792 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# Copyright (c) 2013 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.ExprAST import ExprAST
from slicc.symbols import Type, Var
class VarExprAST(ExprAST):
def __init__(self, slicc, var):
super(VarExprAST, self).__init__(slicc)
self._var = var
def __repr__(self):
return "[VarExprAST: %r]" % self._var
@property
def name(self):
return str(self._var)
@property
def var(self):
var = self.symtab.find(self._var, Var)
if not var:
self.error("Unrecognized variable: %s", self._var)
return var
def assertType(self, type_ident):
expected_type = self.symtab.find(type_ident, Type)
if not expected_type:
self.error("There must be a type '%s' declared in this scope",
type_ident)
if self.var.type != expected_type:
self.error("Incorrect type: " + \
"'%s' is expected to be type '%s' not '%s'",
self.var.ident, expected_type, self.var.type)
def generate(self, code):
fix = code.nofix()
code("${{self.var.code}}")
code.fix(fix)
return self.var.type
| bsd-3-clause |
numenta/nupic | tests/integration/nupic/algorithms/tm_overlapping_sequences_test.py | 10 | 32242 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Overlapping sequences test
===========================
Test learning of sequences with shared (overlapping) subsequences.
Test 1 - Test with fast learning, make sure PAM allows us to train with fewer
repeats of the training data.
Test 2 - Test with slow learning, make sure PAM allows us to train with fewer
repeats of the training data.
Test 3 - Test with slow learning, some overlap in the patterns, and TM
thresholds of 80% of newSynapseCount
Test 4 - Test with "Forbes-like" data. A bunch of sequences of lengths between 2
and 10 elements long.
"""
import numpy
import pprint
import random
import sys
import unittest2 as unittest
from optparse import OptionParser
from nupic.algorithms import fdrutilities as fdrutils
from nupic.algorithms.backtracking_tm import BacktrackingTM
from nupic.algorithms.backtracking_tm_cpp import BacktrackingTMCPP
from nupic.support.unittesthelpers import testcasebase
VERBOSITY = 0 # how chatty the unit tests should be
SEED = 35 # the random seed used throughout
# Whether to only run the short tests.
SHORT = True
# If set to 0 the CPP TM will not be tested
INCLUDE_CPP_TM = 1 # Also test with CPP TM
def printOneTrainingVector(x):
"Print a single vector succinctly."
print ''.join('1' if k != 0 else '.' for k in x)
def printAllTrainingSequences(trainingSequences, upTo = 99999):
for i,trainingSequence in enumerate(trainingSequences):
print "============= Sequence",i,"================="
for j,pattern in enumerate(trainingSequence):
printOneTrainingVector(pattern)
def getSimplePatterns(numOnes, numPatterns, patternOverlap=0):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. The amount of overlap between consecutive patterns is
configurable, via the patternOverlap parameter.
Parameters:
-----------------------------------------------------------------------
numOnes: Number of bits ON in each pattern
numPatterns: Number of unique patterns to generate
patternOverlap: Number of bits of overlap between each successive pattern
retval: patterns
"""
assert (patternOverlap < numOnes)
# How many new bits are introduced in each successive pattern?
numNewBitsInEachPattern = numOnes - patternOverlap
numCols = numNewBitsInEachPattern * numPatterns + patternOverlap
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
startBit = i*numNewBitsInEachPattern
nextStartBit = startBit + numOnes
x[startBit:nextStartBit] = 1
p.append(x)
return p
def buildOverlappedSequences( numSequences = 2,
seqLen = 5,
sharedElements = [3,4],
numOnBitsPerPattern = 3,
patternOverlap = 0,
seqOverlap = 0,
**kwargs
):
""" Create training sequences that share some elements in the middle.
Parameters:
-----------------------------------------------------
numSequences: Number of unique training sequences to generate
seqLen: Overall length of each sequence
sharedElements: Which element indices of each sequence are shared. These
will be in the range between 0 and seqLen-1
numOnBitsPerPattern: Number of ON bits in each TM input pattern
patternOverlap: Max number of bits of overlap between any 2 patterns
retval: (numCols, trainingSequences)
numCols - width of the patterns
trainingSequences - a list of training sequences
"""
# Total number of patterns used to build the sequences
numSharedElements = len(sharedElements)
numUniqueElements = seqLen - numSharedElements
numPatterns = numSharedElements + numUniqueElements * numSequences
# Create the table of patterns
patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)
# Total number of columns required
numCols = len(patterns[0])
# -----------------------------------------------------------------------
# Create the training sequences
trainingSequences = []
uniquePatternIndices = range(numSharedElements, numPatterns)
for i in xrange(numSequences):
sequence = []
# pattern indices [0 ... numSharedElements-1] are reserved for the shared
# middle
sharedPatternIndices = range(numSharedElements)
# Build up the sequence
for j in xrange(seqLen):
if j in sharedElements:
patIdx = sharedPatternIndices.pop(0)
else:
patIdx = uniquePatternIndices.pop(0)
sequence.append(patterns[patIdx])
trainingSequences.append(sequence)
if VERBOSITY >= 3:
print "\nTraining sequences"
printAllTrainingSequences(trainingSequences)
return (numCols, trainingSequences)
def buildSequencePool(numSequences = 10,
seqLen = [2,3,4],
numPatterns = 5,
numOnBitsPerPattern = 3,
patternOverlap = 0,
**kwargs
):
""" Create a bunch of sequences of various lengths, all built from
a fixed set of patterns.
Parameters:
-----------------------------------------------------
numSequences: Number of training sequences to generate
seqLen: List of possible sequence lengths
numPatterns: How many possible patterns there are to use within
sequences
numOnBitsPerPattern: Number of ON bits in each TM input pattern
patternOverlap: Max number of bits of overlap between any 2 patterns
retval: (numCols, trainingSequences)
numCols - width of the patterns
trainingSequences - a list of training sequences
"""
# Create the table of patterns
patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)
# Total number of columns required
numCols = len(patterns[0])
# -----------------------------------------------------------------------
# Create the training sequences
trainingSequences = []
for i in xrange(numSequences):
# Build it up from patterns
sequence = []
length = random.choice(seqLen)
for j in xrange(length):
patIdx = random.choice(xrange(numPatterns))
sequence.append(patterns[patIdx])
# Put it in
trainingSequences.append(sequence)
if VERBOSITY >= 3:
print "\nTraining sequences"
printAllTrainingSequences(trainingSequences)
return (numCols, trainingSequences)
def createTMs(includeCPP = True,
includePy = True,
numCols = 100,
cellsPerCol = 4,
activationThreshold = 3,
minThreshold = 3,
newSynapseCount = 3,
initialPerm = 0.6,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
checkSynapseConsistency = True,
maxInfBacktrack = 0,
maxLrnBacktrack = 0,
**kwargs
):
"""Create one or more TM instances, placing each into a dict keyed by
name.
Parameters:
------------------------------------------------------------------
retval: tms - dict of TM instances
"""
# Keep these fixed:
connectedPerm = 0.5
tms = dict()
if includeCPP:
if VERBOSITY >= 2:
print "Creating BacktrackingTMCPP instance"
cpp_tm = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = cellsPerCol,
initialPerm = initialPerm, connectedPerm = connectedPerm,
minThreshold = minThreshold, newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc, permanenceDec = permanenceDec,
activationThreshold = activationThreshold,
globalDecay = globalDecay, burnIn = 1,
seed=SEED, verbosity=VERBOSITY,
checkSynapseConsistency = checkSynapseConsistency,
collectStats = True,
pamLength = pamLength,
maxInfBacktrack = maxInfBacktrack,
maxLrnBacktrack = maxLrnBacktrack,
)
# Ensure we are copying over learning states for TMDiff
cpp_tm.retrieveLearningStates = True
tms['CPP'] = cpp_tm
if includePy:
if VERBOSITY >= 2:
print "Creating PY TM instance"
py_tm = BacktrackingTM(numberOfCols = numCols, cellsPerColumn = cellsPerCol,
initialPerm = initialPerm, connectedPerm = connectedPerm,
minThreshold = minThreshold, newSynapseCount = newSynapseCount,
permanenceInc = permanenceInc, permanenceDec = permanenceDec,
activationThreshold = activationThreshold,
globalDecay = globalDecay, burnIn = 1,
seed=SEED, verbosity=VERBOSITY,
collectStats = True,
pamLength = pamLength,
maxInfBacktrack = maxInfBacktrack,
maxLrnBacktrack = maxLrnBacktrack,
)
tms['PY '] = py_tm
return tms
def assertNoTMDiffs(tms):
"""
Check for diffs among the TM instances in the passed in tms dict and
raise an assert if any are detected
Parameters:
---------------------------------------------------------------------
tms: dict of TM instances
"""
if len(tms) == 1:
return
if len(tms) > 2:
raise "Not implemented for more than 2 TMs"
same = fdrutils.tmDiff2(*tms.values(), verbosity=VERBOSITY)
assert(same)
return
def evalSequences(tms,
trainingSequences,
testSequences = None,
nTrainRepetitions = 1,
doResets = True,
**kwargs):
"""Train the TMs on the entire training set for nTrainRepetitions in a row.
Then run the test set through inference once and return the inference stats.
Parameters:
---------------------------------------------------------------------
tms: dict of TM instances
trainingSequences: list of training sequences. Each sequence is a list
of TM input patterns
testSequences: list of test sequences. If None, we will test against
the trainingSequences
nTrainRepetitions: Number of times to run the training set through the TM
doResets: If true, send a reset to the TM between each sequence
"""
# If no test sequence is specified, use the first training sequence
if testSequences == None:
testSequences = trainingSequences
# First TM instance is used by default for verbose printing of input values,
# etc.
firstTP = tms.values()[0]
assertNoTMDiffs(tms)
# =====================================================================
# Loop through the training set nTrainRepetitions times
# ==========================================================================
for trainingNum in xrange(nTrainRepetitions):
if VERBOSITY >= 2:
print "\n##############################################################"
print "################# Training round #%d of %d #################" \
% (trainingNum, nTrainRepetitions)
for (name,tm) in tms.iteritems():
print "TM parameters for %s: " % (name)
print "---------------------"
tm.printParameters()
print
# ======================================================================
# Loop through the sequences in the training set
numSequences = len(testSequences)
for sequenceNum, trainingSequence in enumerate(trainingSequences):
numTimeSteps = len(trainingSequence)
if VERBOSITY >= 2:
print "\n================= Sequence #%d of %d ================" \
% (sequenceNum, numSequences)
if doResets:
for tm in tms.itervalues():
tm.reset()
# --------------------------------------------------------------------
# Train each element of the sequence
for t, x in enumerate(trainingSequence):
# Print Verbose info about this element
if VERBOSITY >= 2:
print
if VERBOSITY >= 3:
print "------------------------------------------------------------"
print "--------- sequence: #%d of %d, timeStep: #%d of %d -----------" \
% (sequenceNum, numSequences, t, numTimeSteps)
firstTP.printInput(x)
print "input nzs:", x.nonzero()
# Train in this element
x = numpy.array(x).astype('float32')
for tm in tms.itervalues():
tm.learn(x, enableInference=True)
# Print the input and output states
if VERBOSITY >= 3:
for (name,tm) in tms.iteritems():
print "I/O states of %s TM:" % (name)
print "-------------------------------------",
tm.printStates(printPrevious = (VERBOSITY >= 5))
print
assertNoTMDiffs(tms)
# Print out number of columns that weren't predicted
if VERBOSITY >= 2:
for (name,tm) in tms.iteritems():
stats = tm.getStats()
print "# of unpredicted columns for %s TM: %d of %d" \
% (name, stats['curMissing'], x.sum())
numBurstingCols = tm.infActiveState['t'].min(axis=1).sum()
print "# of bursting columns for %s TM: %d of %d" \
% (name, numBurstingCols, x.sum())
# Print the trained cells
if VERBOSITY >= 4:
print "Sequence %d finished." % (sequenceNum)
for (name,tm) in tms.iteritems():
print "All cells of %s TM:" % (name)
print "-------------------------------------",
tm.printCells()
print
# --------------------------------------------------------------------
# Done training all sequences in this round, print the total number of
# missing, extra columns and make sure it's the same among the TMs
if VERBOSITY >= 2:
print
prevResult = None
for (name,tm) in tms.iteritems():
stats = tm.getStats()
if VERBOSITY >= 1:
print "Stats for %s TM over all sequences for training round #%d of %d:" \
% (name, trainingNum, nTrainRepetitions)
print " total missing:", stats['totalMissing']
print " total extra:", stats['totalExtra']
if prevResult is None:
prevResult = (stats['totalMissing'], stats['totalExtra'])
else:
assert (stats['totalMissing'] == prevResult[0])
assert (stats['totalExtra'] == prevResult[1])
tm.resetStats()
# =====================================================================
# Finish up learning
if VERBOSITY >= 3:
print "Calling trim segments"
prevResult = None
for tm in tms.itervalues():
nSegsRemoved, nSynsRemoved = tm.trimSegments()
if prevResult is None:
prevResult = (nSegsRemoved, nSynsRemoved)
else:
assert (nSegsRemoved == prevResult[0])
assert (nSynsRemoved == prevResult[1])
assertNoTMDiffs(tms)
if VERBOSITY >= 4:
print "Training completed. Complete state:"
for (name,tm) in tms.iteritems():
print "%s:" % (name)
tm.printCells()
print
# ==========================================================================
# Infer
# ==========================================================================
if VERBOSITY >= 2:
print "\n##############################################################"
print "########################## Inference #########################"
# Reset stats in all TMs
for tm in tms.itervalues():
tm.resetStats()
# -------------------------------------------------------------------
# Loop through the test sequences
numSequences = len(testSequences)
for sequenceNum, testSequence in enumerate(testSequences):
numTimeSteps = len(testSequence)
# Identify this sequence
if VERBOSITY >= 2:
print "\n================= Sequence %d of %d ================" \
% (sequenceNum, numSequences)
# Send in the rest
if doResets:
for tm in tms.itervalues():
tm.reset()
# -------------------------------------------------------------------
# Loop through the elements of this sequence
for t,x in enumerate(testSequence):
# Print verbose info about this element
if VERBOSITY >= 2:
print
if VERBOSITY >= 3:
print "------------------------------------------------------------"
print "--------- sequence: #%d of %d, timeStep: #%d of %d -----------" \
% (sequenceNum, numSequences, t, numTimeSteps)
firstTP.printInput(x)
print "input nzs:", x.nonzero()
# Infer on this element
for tm in tms.itervalues():
tm.infer(x)
assertNoTMDiffs(tms)
# Print out number of columns that weren't predicted
if VERBOSITY >= 2:
for (name,tm) in tms.iteritems():
stats = tm.getStats()
print "# of unpredicted columns for %s TM: %d of %d" \
% (name, stats['curMissing'], x.sum())
# Debug print of internal state
if VERBOSITY >= 3:
for (name,tm) in tms.iteritems():
print "I/O states of %s TM:" % (name)
print "-------------------------------------",
tm.printStates(printPrevious = (VERBOSITY >= 5),
printLearnState = False)
print
# Done with this sequence
# Debug print of all stats of the TMs
if VERBOSITY >= 4:
print
for (name,tm) in tms.iteritems():
print "Interim internal stats for %s TM:" % (name)
print "---------------------------------"
pprint.pprint(tm.getStats())
print
if VERBOSITY >= 2:
print "\n##############################################################"
print "####################### Inference Done #######################"
# Get the overall stats for each TM and return them
tpStats = dict()
for (name,tm) in tms.iteritems():
tpStats[name] = stats = tm.getStats()
if VERBOSITY >= 2:
print "Stats for %s TM over all sequences:" % (name)
print " total missing:", stats['totalMissing']
print " total extra:", stats['totalExtra']
for (name,tm) in tms.iteritems():
if VERBOSITY >= 3:
print "\nAll internal stats for %s TM:" % (name)
print "-------------------------------------",
pprint.pprint(tpStats[name])
print
return tpStats
def _testConfig(baseParams, expMissingMin=0, expMissingMax=0, **mods):
"""
Build up a set of sequences, create the TM(s), train them, test them,
and check that we got the expected number of missing predictions during
inference.
Parameters:
-----------------------------------------------------------------------
baseParams: dict of all of the parameters for building sequences,
creating the TMs, and training and testing them. This
gets updated from 'mods' before we use it.
expMissingMin: Minimum number of expected missing predictions during testing.
expMissingMax: Maximum number of expected missing predictions during testing.
mods: dict of modifications to make to the baseParams.
"""
# Update the base with the modifications
params = dict(baseParams)
params.update(mods)
# --------------------------------------------------------------------
# Create the sequences
func = params['seqFunction']
(numCols, trainingSequences) = func(**params)
# --------------------------------------------------------------------
# Create the TMs
if params['numCols'] is None:
params['numCols'] = numCols
tps = createTMs(**params)
# --------------------------------------------------------------------
# Train and get test results
tpStats = evalSequences(tms= tps,
trainingSequences=trainingSequences,
testSequences=None,
**params)
# -----------------------------------------------------------------------
# Make sure there are the expected number of missing predictions
for (name, stats) in tpStats.iteritems():
print "Detected %d missing predictions overall during inference" \
% (stats['totalMissing'])
if expMissingMin is not None and stats['totalMissing'] < expMissingMin:
print "FAILURE: Expected at least %d total missing but got %d" \
% (expMissingMin, stats['totalMissing'])
assert False
if expMissingMax is not None and stats['totalMissing'] > expMissingMax:
print "FAILURE: Expected at most %d total missing but got %d" \
% (expMissingMax, stats['totalMissing'])
assert False
return True
class TMOverlappingSeqsTest(testcasebase.TestCaseBase):
def testFastLearning(self):
"""
Test with fast learning, make sure PAM allows us to train with fewer
repeats of the training data.
"""
numOnBitsPerPattern = 3
# ================================================================
# Base params
baseParams = dict(
# Sequence generation
seqFunction = buildOverlappedSequences,
numSequences = 2,
seqLen = 10,
sharedElements = [2,3],
numOnBitsPerPattern = numOnBitsPerPattern,
# TM construction
includeCPP = INCLUDE_CPP_TM,
numCols = None, # filled in based on generated sequences
activationThreshold = numOnBitsPerPattern,
minThreshold = numOnBitsPerPattern,
newSynapseCount = numOnBitsPerPattern,
initialPerm = 0.6,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
# Training/testing
nTrainRepetitions = 8,
doResets = True,
)
# ================================================================
# Run various configs
# No PAM, with 3 repetitions, still missing predictions
print "\nRunning without PAM, 3 repetitions of the training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=20,
expMissingMax=None, pamLength=1,
nTrainRepetitions=3))
# With PAM, with only 3 repetitions, 0 missing predictions
print "\nRunning with PAM, 3 repetitions of the training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, pamLength=5,
nTrainRepetitions=3))
def testSlowLearning(self):
"""
Test with slow learning, make sure PAM allows us to train with fewer
repeats of the training data.
"""
numOnBitsPerPattern = 3
# ================================================================
# Base params
baseParams = dict(
# Sequence generation
seqFunction = buildOverlappedSequences,
numSequences = 2,
seqLen = 10,
sharedElements = [2,3],
numOnBitsPerPattern = numOnBitsPerPattern,
# TM construction
includeCPP = INCLUDE_CPP_TM,
numCols = None, # filled in based on generated sequences
activationThreshold = numOnBitsPerPattern,
minThreshold = numOnBitsPerPattern,
newSynapseCount = numOnBitsPerPattern,
initialPerm = 0.11,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
# Training/testing
nTrainRepetitions = 8,
doResets = True,
)
# ================================================================
# Run various configs
# No PAM, requires 40 repetitions
# No PAM, with 10 repetitions, still missing predictions
print "\nRunning without PAM, 10 repetitions of the training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=10,
expMissingMax=None, pamLength=1,
nTrainRepetitions=10))
# With PAM, with only 10 repetitions, 0 missing predictions
print "\nRunning with PAM, 10 repetitions of the training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, pamLength=6,
nTrainRepetitions=10))
def testSlowLearningWithOverlap(self):
"""
Test with slow learning, some overlap in the patterns, and TM thresholds
of 80% of newSynapseCount
Make sure PAM allows us to train with fewer repeats of the training data.
"""
# Cannot use skipIf decorator because it reads SHORT before it is set.
if SHORT:
self.skipTest("Test skipped by default. Enable with --long.")
numOnBitsPerPattern = 5
# ================================================================
# Base params
baseParams = dict(
# Sequence generation
seqFunction = buildOverlappedSequences,
numSequences = 2,
seqLen = 10,
sharedElements = [2,3],
numOnBitsPerPattern = numOnBitsPerPattern,
patternOverlap = 2,
# TM construction
includeCPP = INCLUDE_CPP_TM,
numCols = None, # filled in based on generated sequences
activationThreshold = int(0.8 * numOnBitsPerPattern),
minThreshold = int(0.8 * numOnBitsPerPattern),
newSynapseCount = numOnBitsPerPattern,
initialPerm = 0.11,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
# Training/testing
nTrainRepetitions = 8,
doResets = True,
)
# ================================================================
# Run various configs
# No PAM, with 10 repetitions, still missing predictions
print "\nRunning without PAM, 10 repetitions of the training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=10,
expMissingMax=None, pamLength=1,
nTrainRepetitions=10))
# With PAM, with only 10 repetitions, 0 missing predictions
print "\nRunning with PAM, 10 repetitions of the training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, pamLength=6,
nTrainRepetitions=10))
def testForbesLikeData(self):
"""
Test with "Forbes-like" data. A bunch of sequences of lengths between 2
and 10 elements long.
We will test with both fast and slow learning.
Make sure PAM allows us to train with fewer repeats of the training data.
"""
# Cannot use skipIf decorator because it reads SHORT before it is set.
if SHORT:
self.skipTest("Test skipped by default. Enable with --long.")
numOnBitsPerPattern = 3
# ================================================================
# Base params
baseParams = dict(
# Sequence generation
seqFunction = buildSequencePool,
numSequences = 20,
seqLen = [3,10],
numPatterns = 10,
numOnBitsPerPattern = numOnBitsPerPattern,
patternOverlap = 1,
# TM construction
includeCPP = INCLUDE_CPP_TM,
numCols = None, # filled in based on generated sequences
activationThreshold = int(0.8 * numOnBitsPerPattern),
minThreshold = int(0.8 * numOnBitsPerPattern),
newSynapseCount = numOnBitsPerPattern,
initialPerm = 0.51,
permanenceInc = 0.1,
permanenceDec = 0.0,
globalDecay = 0.0,
pamLength = 0,
checkSynapseConsistency = False,
# Training/testing
nTrainRepetitions = 8,
doResets = True,
)
# ================================================================
# Run various configs
# Fast mode, no PAM
# Fast mode, with PAM
print "\nRunning without PAM, fast learning, 2 repetitions of the " \
"training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=50,
expMissingMax=None, pamLength=1,
nTrainRepetitions=2))
# Fast mode, with PAM
print "\nRunning with PAM, fast learning, 2 repetitions of the " \
"training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, pamLength=5,
nTrainRepetitions=2))
# Slow mode, no PAM
print "\nRunning without PAM, slow learning, 8 repetitions of the " \
"training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=1,
expMissingMax=None, initialPerm=0.31,
pamLength=1, nTrainRepetitions=8))
# Fast mode, with PAM
print "\nRunning with PAM, slow learning, 8 repetitions of the " \
"training data..."
self.assertTrue(_testConfig(baseParams=baseParams, expMissingMin=0,
expMissingMax=0, initialPerm=0.31, pamLength=5,
nTrainRepetitions=8))
if __name__=="__main__":
# Process command line arguments
parser = OptionParser()
parser.add_option(
"--verbosity", default=VERBOSITY, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
parser.add_option("--seed", default=SEED, type="int",
help="Random seed to use [default: %default].")
parser.add_option("--short", action="store_true", default=True,
help="Run short version of the tests [default: %default].")
parser.add_option("--long", action="store_true", default=False,
help="Run long version of the tests [default: %default].")
(options, args) = parser.parse_args()
SEED = options.seed
VERBOSITY = options.verbosity
SHORT = not options.long
# Seed the random number generators
rgen = numpy.random.RandomState(SEED)
random.seed(SEED)
if not INCLUDE_CPP_TM:
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
print "!! WARNING: C++ TM testing is DISABLED until it can be updated."
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
# Form the command line for the unit test framework.
args = [sys.argv[0]] + args
unittest.main(argv=args, verbosity=VERBOSITY)
| agpl-3.0 |
hrh5775/LibraryManager | PythonTestClient/LibraryManagerTestClient/venv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/response.py | 360 | 18615 | from __future__ import absolute_import
from contextlib import contextmanager
import zlib
import io
from socket import timeout as SocketTimeout
from socket import error as SocketError
from ._collections import HTTPHeaderDict
from .exceptions import (
ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked
)
from .packages.six import string_types as basestring, binary_type, PY3
from .packages.six.moves import http_client as httplib
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed, is_response_to_head
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get('transfer-encoding', '').lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
@property
def connection(self):
return self._connection
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessar.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
content_encoding = self.headers.get('content-encoding', '').lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content:
data += self._flush_decoder()
return data
def _flush_decoder(self):
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
buf = self._decoder.decompress(b'')
return buf + self._decoder.flush()
return b''
@contextmanager
def _error_catcher(self):
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except (HTTPException, SocketError) as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
data = None
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if data:
self._fp_bytes_read += len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked:
for line in self.read_chunked(amt, decode_content=decode_content):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
if self._connection:
self._connection.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
def _update_chunk_length(self):
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(line)
def _handle_chunk(self, amt):
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
returned_chunk = chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return
with self._error_catcher():
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(chunk, decode_content=decode_content,
flush_decoder=False)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
| gpl-3.0 |
saurabh6790/pow-lib | webnotes/model/bean.py | 31 | 14413 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Transactions are defined as collection of classes, a Bean represents collection of Document
objects for a transaction with main and children.
Group actions like save, etc are performed on doclists
"""
import webnotes
from webnotes import _, msgprint
from webnotes.utils import cint, cstr, flt
from webnotes.model.doc import Document
try:
from startup.bean_handlers import on_method
except ImportError:
on_method = None
class DocstatusTransitionError(webnotes.ValidationError): pass
class BeanPermissionError(webnotes.ValidationError): pass
class TimestampMismatchError(webnotes.ValidationError): pass
class Bean:
"""
Collection of Documents with one parent and multiple children
"""
def __init__(self, dt=None, dn=None):
self.obj = None
self.ignore_permissions = False
self.ignore_children_type = []
self.ignore_links = False
self.ignore_validate = False
self.ignore_fields = False
self.ignore_mandatory = False
if isinstance(dt, basestring) and not dn:
dn = dt
if dt and dn:
self.load_from_db(dt, dn)
elif isinstance(dt, list):
self.set_doclist(dt)
elif isinstance(dt, dict):
self.set_doclist([dt])
def load_from_db(self, dt=None, dn=None):
"""
Load doclist from dt
"""
from webnotes.model.doc import getchildren
if not dt: dt = self.doc.doctype
if not dn: dn = self.doc.name
doc = Document(dt, dn)
# get all children types
tablefields = webnotes.model.meta.get_table_fields(dt)
# load chilren
doclist = webnotes.doclist([doc,])
for t in tablefields:
doclist += getchildren(doc.name, t[0], t[1], dt)
self.set_doclist(doclist)
if dt == dn:
self.convert_type(self.doc)
def __iter__(self):
return self.doclist.__iter__()
@property
def meta(self):
if not hasattr(self, "_meta"):
self._meta = webnotes.get_doctype(self.doc.doctype)
return self._meta
def from_compressed(self, data, docname):
from webnotes.model.utils import expand
self.set_doclist(expand(data))
def set_doclist(self, doclist):
for i, d in enumerate(doclist):
if isinstance(d, dict):
doclist[i] = Document(fielddata=d)
self.doclist = webnotes.doclist(doclist)
self.doc = self.doclist[0]
if self.obj:
self.obj.doclist = self.doclist
self.obj.doc = self.doc
def make_controller(self):
if self.obj:
# update doclist before running any method
self.obj.doclist = self.doclist
return self.obj
self.obj = webnotes.get_obj(doc=self.doc, doclist=self.doclist)
self.obj.bean = self
self.controller = self.obj
return self.obj
def get_controller(self):
return self.make_controller()
def to_dict(self):
return [d.fields for d in self.doclist]
def check_if_latest(self, method="save"):
from webnotes.model.meta import is_single
conflict = False
if not cint(self.doc.fields.get('__islocal')):
if is_single(self.doc.doctype):
modified = webnotes.conn.get_value(self.doc.doctype, self.doc.name, "modified")
if isinstance(modified, list):
modified = modified[0]
if cstr(modified) and cstr(modified) != cstr(self.doc.modified):
conflict = True
else:
tmp = webnotes.conn.sql("""select modified, docstatus from `tab%s`
where name="%s" for update"""
% (self.doc.doctype, self.doc.name), as_dict=True)
if not tmp:
webnotes.msgprint("""This record does not exist. Please refresh.""", raise_exception=1)
modified = cstr(tmp[0].modified)
if modified and modified != cstr(self.doc.modified):
conflict = True
self.check_docstatus_transition(tmp[0].docstatus, method)
if conflict:
webnotes.msgprint(_("Error: Document has been modified after you have opened it") \
+ (" (%s, %s). " % (modified, self.doc.modified)) \
+ _("Please refresh to get the latest document."), raise_exception=TimestampMismatchError)
def check_docstatus_transition(self, db_docstatus, method):
valid = {
"save": [0,0],
"submit": [0,1],
"cancel": [1,2],
"update_after_submit": [1,1]
}
labels = {
0: _("Draft"),
1: _("Submitted"),
2: _("Cancelled")
}
if not hasattr(self, "to_docstatus"):
self.to_docstatus = 0
if method != "runserverobj" and [db_docstatus, self.to_docstatus] != valid[method]:
webnotes.msgprint(_("Cannot change from") + ": " + labels[db_docstatus] + " > " + \
labels[self.to_docstatus], raise_exception=DocstatusTransitionError)
def check_links(self):
if self.ignore_links:
return
ref, err_list = {}, []
for d in self.doclist:
if not ref.get(d.doctype):
ref[d.doctype] = d.make_link_list()
err_list += d.validate_links(ref[d.doctype])
if err_list:
webnotes.msgprint("""[Link Validation] Could not find the following values: %s.
Please correct and resave. Document Not Saved.""" % ', '.join(err_list), raise_exception=1)
def update_timestamps_and_docstatus(self):
from webnotes.utils import now
ts = now()
user = webnotes.__dict__.get('session', {}).get('user') or 'Administrator'
for d in self.doclist:
if self.doc.fields.get('__islocal'):
if not d.owner:
d.owner = user
if not d.creation:
d.creation = ts
d.modified_by = user
d.modified = ts
if d.docstatus != 2 and self.to_docstatus >= int(d.docstatus): # don't update deleted
d.docstatus = self.to_docstatus
def prepare_for_save(self, method):
self.check_if_latest(method)
self.update_timestamps_and_docstatus()
self.update_parent_info()
if self.doc.fields.get("__islocal"):
# set name before validate
self.doc.set_new_name(self.get_controller())
self.run_method('before_insert')
if method != "cancel":
self.extract_images_from_text_editor()
self.check_links()
def update_parent_info(self):
idx_map = {}
is_local = cint(self.doc.fields.get("__islocal"))
if not webnotes.flags.in_import:
parentfields = [d.fieldname for d in self.meta.get({"doctype": "DocField", "fieldtype": "Table"})]
for i, d in enumerate(self.doclist[1:]):
if d.parentfield:
if not webnotes.flags.in_import:
if not d.parentfield in parentfields:
webnotes.msgprint("Bad parentfield %s" % d.parentfield,
raise_exception=True)
d.parenttype = self.doc.doctype
d.parent = self.doc.name
if not d.idx:
d.idx = idx_map.setdefault(d.parentfield, 0) + 1
else:
d.idx = cint(d.idx)
if is_local:
# if parent is new, all children should be new
d.fields["__islocal"] = 1
d.name = None
idx_map[d.parentfield] = d.idx
def run_method(self, method, *args, **kwargs):
self.make_controller()
if hasattr(self.controller, method):
getattr(self.controller, method)(*args, **kwargs)
if hasattr(self.controller, 'custom_' + method):
getattr(self.controller, 'custom_' + method)(*args, **kwargs)
notify(self, method)
self.set_doclist(self.controller.doclist)
def get_method(self, method):
self.make_controller()
return getattr(self.controller, method, None)
def insert(self):
self.doc.fields["__islocal"] = 1
self.set_defaults()
if webnotes.flags.in_test:
if self.meta.get_field("naming_series"):
self.doc.naming_series = "_T-" + self.doc.doctype + "-"
return self.save()
def insert_or_update(self):
if self.doc.name and webnotes.conn.exists(self.doc.doctype, self.doc.name):
return self.save()
else:
return self.insert()
def set_defaults(self):
if webnotes.flags.in_import:
return
new_docs = {}
new_doclist = []
for d in self.doclist:
if not d.doctype in new_docs:
new_docs[d.doctype] = webnotes.new_doc(d.doctype)
newd = webnotes.doc(new_docs[d.doctype].fields.copy())
newd.fields.update(d.fields)
new_doclist.append(newd)
self.set_doclist(new_doclist)
def has_read_perm(self):
return webnotes.has_permission(self.doc.doctype, "read", self.doc)
def save(self, check_links=1):
perm_to_check = "write"
if self.doc.fields.get("__islocal"):
perm_to_check = "create"
if not self.doc.owner:
self.doc.owner = webnotes.session.user
if self.ignore_permissions or webnotes.has_permission(self.doc.doctype, perm_to_check, self.doc):
self.to_docstatus = 0
self.prepare_for_save("save")
if not self.ignore_validate:
self.run_method('validate')
if not self.ignore_mandatory:
self.check_mandatory()
self.save_main()
self.save_children()
self.run_method('on_update')
if perm_to_check=="create":
self.run_method("after_insert")
else:
self.no_permission_to(_(perm_to_check.title()))
return self
def submit(self):
if self.ignore_permissions or webnotes.has_permission(self.doc.doctype, "submit", self.doc):
self.to_docstatus = 1
self.prepare_for_save("submit")
self.run_method('validate')
self.check_mandatory()
self.save_main()
self.save_children()
self.run_method('on_update')
self.run_method('on_submit')
else:
self.no_permission_to(_("Submit"))
return self
def cancel(self):
if self.ignore_permissions or webnotes.has_permission(self.doc.doctype, "cancel", self.doc):
self.to_docstatus = 2
self.prepare_for_save("cancel")
self.run_method('before_cancel')
self.save_main()
self.save_children()
self.run_method('on_cancel')
self.check_no_back_links_exist()
else:
self.no_permission_to(_("Cancel"))
return self
def update_after_submit(self):
if self.doc.docstatus != 1:
webnotes.msgprint("Only to called after submit", raise_exception=1)
if self.ignore_permissions or webnotes.has_permission(self.doc.doctype, "write", self.doc):
self.to_docstatus = 1
self.prepare_for_save("update_after_submit")
self.run_method('validate')
self.run_method('before_update_after_submit')
self.save_main()
self.save_children()
self.run_method('on_update_after_submit')
else:
self.no_permission_to(_("Update"))
return self
def save_main(self):
try:
self.doc.save(check_links = False, ignore_fields = self.ignore_fields)
except NameError, e:
webnotes.msgprint('%s "%s" already exists' % (self.doc.doctype, self.doc.name))
# prompt if cancelled
if webnotes.conn.get_value(self.doc.doctype, self.doc.name, 'docstatus')==2:
webnotes.msgprint('[%s "%s" has been cancelled]' % (self.doc.doctype, self.doc.name))
webnotes.errprint(webnotes.utils.getTraceback())
raise
def save_children(self):
child_map = {}
for d in self.doclist[1:]:
if d.fields.get("parent") or d.fields.get("parentfield"):
d.parent = self.doc.name # rename if reqd
d.parenttype = self.doc.doctype
d.save(check_links=False, ignore_fields = self.ignore_fields)
child_map.setdefault(d.doctype, []).append(d.name)
# delete all children in database that are not in the child_map
# get all children types
tablefields = webnotes.model.meta.get_table_fields(self.doc.doctype)
for dt in tablefields:
if dt[0] not in self.ignore_children_type:
cnames = child_map.get(dt[0]) or []
if cnames:
webnotes.conn.sql("""delete from `tab%s` where parent=%s and parenttype=%s and
name not in (%s)""" % (dt[0], '%s', '%s', ','.join(['%s'] * len(cnames))),
tuple([self.doc.name, self.doc.doctype] + cnames))
else:
webnotes.conn.sql("""delete from `tab%s` where parent=%s and parenttype=%s""" \
% (dt[0], '%s', '%s'), (self.doc.name, self.doc.doctype))
def delete(self):
webnotes.delete_doc(self.doc.doctype, self.doc.name)
def no_permission_to(self, ptype):
webnotes.msgprint(("%s (%s): " % (self.doc.name, _(self.doc.doctype))) + \
_("No Permission to ") + ptype, raise_exception=BeanPermissionError)
def check_no_back_links_exist(self):
from webnotes.model.utils import check_if_doc_is_linked
check_if_doc_is_linked(self.doc.doctype, self.doc.name, method="Cancel")
def check_mandatory(self):
missing = []
for doc in self.doclist:
for df in self.meta:
if df.doctype=="DocField" and df.reqd and df.parent==doc.doctype and df.fieldname!="naming_series":
msg = ""
if df.fieldtype == "Table":
if not self.doclist.get({"parentfield": df.fieldname}):
msg = _("Error") + ": " + _("Data missing in table") + ": " + _(df.label)
elif doc.fields.get(df.fieldname) is None:
msg = _("Error") + ": "
if doc.parentfield:
msg += _("Row") + (" # %s: " % (doc.idx,))
msg += _("Value missing for") + ": " + _(df.label)
if msg:
missing.append([msg, df.fieldname])
if missing:
for msg, fieldname in missing:
msgprint(msg)
raise webnotes.MandatoryError, ", ".join([fieldname for msg, fieldname in missing])
def convert_type(self, doc):
if doc.doctype==doc.name and doc.doctype!="DocType":
for df in self.meta.get({"doctype": "DocField", "parent": doc.doctype}):
if df.fieldtype in ("Int", "Check"):
doc.fields[df.fieldname] = cint(doc.fields.get(df.fieldname))
elif df.fieldtype in ("Float", "Currency"):
doc.fields[df.fieldname] = flt(doc.fields.get(df.fieldname))
doc.docstatus = cint(doc.docstatus)
def extract_images_from_text_editor(self):
from webnotes.utils.file_manager import extract_images_from_html
if self.doc.doctype != "DocType":
for df in self.meta.get({"doctype": "DocField", "parent": self.doc.doctype, "fieldtype":"Text Editor"}):
extract_images_from_html(self.doc, df.fieldname)
def clone(source_wrapper):
""" make a clone of a document"""
if isinstance(source_wrapper, list):
source_wrapper = Bean(source_wrapper)
new_wrapper = Bean(source_wrapper.doclist.copy())
if new_wrapper.doc.fields.get("amended_from"):
new_wrapper.doc.fields["amended_from"] = None
if new_wrapper.doc.fields.get("amendment_date"):
new_wrapper.doc.fields["amendment_date"] = None
for d in new_wrapper.doclist:
d.fields.update({
"name": None,
"__islocal": 1,
"docstatus": 0,
})
return new_wrapper
def notify(bean, method):
if on_method:
on_method(bean, method)
# for bc
def getlist(doclist, parentfield):
import webnotes.model.utils
return webnotes.model.utils.getlist(doclist, parentfield)
def copy_doclist(doclist, no_copy = []):
"""
Make a copy of the doclist
"""
import webnotes.model.utils
return webnotes.model.utils.copy_doclist(doclist, no_copy)
| mit |
wimnat/ansible-modules-extras | notification/osx_say.py | 161 | 2108 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Michael DeHaan <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: osx_say
version_added: "1.2"
short_description: Makes an OSX computer to speak.
description:
- makes an OS computer speak! Amuse your friends, annoy your coworkers!
notes:
- If you like this module, you may also be interested in the osx_say callback in the plugins/ directory of the source checkout.
options:
msg:
description:
What to say
required: true
voice:
description:
What voice to use
required: false
requirements: [ say ]
author:
- "Ansible Core Team"
- "Michael DeHaan (@mpdehaan)"
'''
EXAMPLES = '''
- local_action: osx_say msg="{{inventory_hostname}} is all done" voice=Zarvox
'''
DEFAULT_VOICE='Trinoids'
def say(module, msg, voice):
module.run_command(["/usr/bin/say", msg, "--voice=%s" % (voice)], check_rc=True)
def main():
module = AnsibleModule(
argument_spec=dict(
msg=dict(required=True),
voice=dict(required=False, default=DEFAULT_VOICE),
),
supports_check_mode=False
)
if not os.path.exists("/usr/bin/say"):
module.fail_json(msg="/usr/bin/say is not installed")
msg = module.params['msg']
voice = module.params['voice']
say(module, msg, voice)
module.exit_json(msg=msg, changed=False)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
mikecroucher/GPy | GPy/models/gp_grid_regression.py | 6 | 1195 | # Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
# Kurt Cutajar
from ..core import GpGrid
from .. import likelihoods
from .. import kern
class GPRegressionGrid(GpGrid):
"""
Gaussian Process model for grid inputs using Kronecker products
This is a thin wrapper around the models.GpGrid class, with a set of sensible defaults
:param X: input observations
:param Y: observed values
:param kernel: a GPy kernel, defaults to the kron variation of SqExp
:param Norm normalizer: [False]
Normalize Y with the norm given.
If normalizer is False, no normalization will be done
If it is None, we use GaussianNorm(alization)
.. Note:: Multiple independent outputs are allowed using columns of Y
"""
def __init__(self, X, Y, kernel=None, Y_metadata=None, normalizer=None):
if kernel is None:
kernel = kern.RBF(1) # no other kernels implemented so far
likelihood = likelihoods.Gaussian()
super(GPRegressionGrid, self).__init__(X, Y, kernel, likelihood, name='GP Grid regression', Y_metadata=Y_metadata, normalizer=normalizer)
| bsd-3-clause |
Epirex/android_external_chromium_org | build/android/pylib/utils/test_options_parser.py | 54 | 3471 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses options for the instrumentation tests."""
import os
# TODO(gkanwar): Some downstream scripts current rely on these functions
# existing. This dependency should be removed, and this file deleted, in the
# future.
def AddBuildTypeOption(option_parser):
"""Decorates OptionParser with build type option."""
default_build_type = 'Debug'
if 'BUILDTYPE' in os.environ:
default_build_type = os.environ['BUILDTYPE']
option_parser.add_option('--debug', action='store_const', const='Debug',
dest='build_type', default=default_build_type,
help='If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug')
option_parser.add_option('--release', action='store_const', const='Release',
dest='build_type',
help='If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.')
def AddTestRunnerOptions(option_parser, default_timeout=60):
"""Decorates OptionParser with options applicable to all tests."""
option_parser.add_option('-t', dest='timeout',
help='Timeout to wait for each test',
type='int',
default=default_timeout)
option_parser.add_option('-c', dest='cleanup_test_files',
help='Cleanup test files on the device after run',
action='store_true')
option_parser.add_option('--num_retries', dest='num_retries', type='int',
default=2,
help='Number of retries for a test before '
'giving up.')
option_parser.add_option('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
profilers = ['devicestatsmonitor', 'chrometrace', 'dumpheap', 'smaps',
'traceview']
option_parser.add_option('--profiler', dest='profilers', action='append',
choices=profilers,
help='Profiling tool to run during test. '
'Pass multiple times to run multiple profilers. '
'Available profilers: %s' % profilers)
option_parser.add_option('--tool',
dest='tool',
help='Run the test under a tool '
'(use --tool help to list them)')
option_parser.add_option('--flakiness-dashboard-server',
dest='flakiness_dashboard_server',
help=('Address of the server that is hosting the '
'Chrome for Android flakiness dashboard.'))
option_parser.add_option('--skip-deps-push', dest='push_deps',
action='store_false', default=True,
help='Do not push dependencies to the device. '
'Use this at own risk for speeding up test '
'execution on local machine.')
AddBuildTypeOption(option_parser)
| bsd-3-clause |
valdecdev/odoo | addons/event/tests/test_mail_schedule.py | 2 | 3946 | # -*- coding: utf-8 -*-
import datetime
from dateutil.relativedelta import relativedelta
from openerp import fields, tools
from openerp.addons.event.tests.common import TestEventCommon
from openerp.tools import mute_logger
class TestMailSchedule(TestEventCommon):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_event_mail_schedule(self):
""" Test mail scheduling for events """
self.env['ir.values'].set_default('marketing.config.settings', 'auto_confirmation', True)
now = fields.datetime.now()
event_date_begin = now + relativedelta(days=1)
event_date_end = now + relativedelta(days=3)
test_event = self.Event.sudo(self.user_eventmanager).create({
'name': 'TestEventMail',
'date_begin': event_date_begin,
'date_end': event_date_end,
'seats_max': 10,
'event_mail_ids': [
(0, 0, { # right at subscription
'interval_unit': 'now',
'interval_type': 'after_sub',
'template_id': self.env['ir.model.data'].xmlid_to_res_id('event.event_subscription')}),
(0, 0, { # 2 days before event
'interval_nbr': 2,
'interval_unit': 'days',
'interval_type': 'before_event',
'template_id': self.env['ir.model.data'].xmlid_to_res_id('event.event_reminder')}),
]
})
# create some registrations
self.Registration.sudo(self.user_eventuser).create({
'event_id': test_event.id,
'name': 'Reg0',
'email': 'reg0@example.com',
})
self.Registration.sudo(self.user_eventuser).create({
'event_id': test_event.id,
'name': 'Reg1',
'email': 'reg1@example.com',
})
# check subscription scheduler
schedulers = self.EventMail.search([('event_id', '=', test_event.id), ('interval_type', '=', 'after_sub')])
self.assertEqual(len(schedulers), 1, 'event: wrong scheduler creation')
self.assertEqual(schedulers[0].scheduled_date, test_event.create_date, 'event: incorrect scheduled date for checking controller')
# verify that subscription scheduler was auto-executed after each registration
self.assertEqual(len(schedulers[0].mail_registration_ids), 2, 'event: incorrect number of mail scheduled date')
mails = self.env['mail.mail'].search([('subject', 'ilike', 'subscription'), ('date', '>=', datetime.datetime.strftime(now, tools.DEFAULT_SERVER_DATETIME_FORMAT))], order='date DESC', limit=3)
self.assertEqual(len(mails), 2, 'event: wrong number of subscription mail sent')
for registration in schedulers[0].mail_registration_ids:
self.assertTrue(registration.mail_sent, 'event: wrongly confirmed mailing on subscription')
# check before event scheduler
schedulers = self.EventMail.search([('event_id', '=', test_event.id), ('interval_type', '=', 'before_event')])
self.assertEqual(len(schedulers), 1, 'event: wrong scheduler creation')
self.assertEqual(schedulers[0].scheduled_date, datetime.datetime.strftime(event_date_begin + relativedelta(days=-2), tools.DEFAULT_SERVER_DATETIME_FORMAT), 'event: incorrect scheduled date')
# execute event reminder scheduler explicitly
schedulers[0].execute()
self.assertTrue(schedulers[0].mail_sent, 'event: reminder scheduler should have sent an email')
self.assertTrue(schedulers[0].done, 'event: reminder scheduler should be done')
mails = self.env['mail.mail'].search([('subject', 'ilike', 'reminder'), ('date', '>=', datetime.datetime.strftime(now, tools.DEFAULT_SERVER_DATETIME_FORMAT))], order='date DESC', limit=3)
self.assertEqual(len(mails), 2, 'event: wrong number of reminders in outgoing mail queue')
| agpl-3.0 |
bellowsj/aiopogo | aiopogo/pogoprotos/settings/master/equipped_badge_settings_pb2.py | 1 | 3205 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/settings/master/equipped_badge_settings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/settings/master/equipped_badge_settings.proto',
package='pogoprotos.settings.master',
syntax='proto3',
serialized_pb=_b('\n8pogoprotos/settings/master/equipped_badge_settings.proto\x12\x1apogoprotos.settings.master\"y\n\x15\x45quippedBadgeSettings\x12\x1f\n\x17\x65quip_badge_cooldown_ms\x18\x01 \x01(\x03\x12\x1f\n\x17\x63\x61tch_probability_bonus\x18\x02 \x03(\x02\x12\x1e\n\x16\x66lee_probability_bonus\x18\x03 \x03(\x02\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EQUIPPEDBADGESETTINGS = _descriptor.Descriptor(
name='EquippedBadgeSettings',
full_name='pogoprotos.settings.master.EquippedBadgeSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='equip_badge_cooldown_ms', full_name='pogoprotos.settings.master.EquippedBadgeSettings.equip_badge_cooldown_ms', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='catch_probability_bonus', full_name='pogoprotos.settings.master.EquippedBadgeSettings.catch_probability_bonus', index=1,
number=2, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='flee_probability_bonus', full_name='pogoprotos.settings.master.EquippedBadgeSettings.flee_probability_bonus', index=2,
number=3, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=209,
)
DESCRIPTOR.message_types_by_name['EquippedBadgeSettings'] = _EQUIPPEDBADGESETTINGS
EquippedBadgeSettings = _reflection.GeneratedProtocolMessageType('EquippedBadgeSettings', (_message.Message,), dict(
DESCRIPTOR = _EQUIPPEDBADGESETTINGS,
__module__ = 'pogoprotos.settings.master.equipped_badge_settings_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.settings.master.EquippedBadgeSettings)
))
_sym_db.RegisterMessage(EquippedBadgeSettings)
# @@protoc_insertion_point(module_scope)
| mit |
stanlyxiang/incubator-hawq | tools/bin/pythonSrc/pexpect-4.2/tests/test_socket.py | 7 | 10537 | #!/usr/bin/env python
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import pexpect
from pexpect import fdpexpect
import unittest
from . import PexpectTestCase
import multiprocessing
import os
import signal
import socket
import time
import errno
class SocketServerError(Exception):
pass
class ExpectTestCase(PexpectTestCase.PexpectTestCase):
def setUp(self):
print(self.id())
PexpectTestCase.PexpectTestCase.setUp(self)
self.host = '127.0.0.1'
self.port = 49152 + 10000
self.motd = b"""\
------------------------------------------------------------------------------
* Welcome to the SOCKET UNIT TEST code! *
------------------------------------------------------------------------------
* *
* This unit test code is our best effort at testing the ability of the *
* pexpect library to handle sockets. We need some text to test buffer size *
* handling. *
* *
* A page is 1024 bytes or 1K. 80 x 24 = 1920. So a standard terminal window *
* contains more than one page. We actually want more than a page for our *
* tests. *
* *
* This is the twelfth line, and we need 24. So we need a few more paragraphs.*
* We can keep them short and just put lines between them. *
* *
* The 80 x 24 terminal size comes from the ancient past when computers were *
* only able to display text in cuneiform writing. *
* *
* The cunieform writing system used the edge of a reed to make marks on clay *
* tablets. *
* *
* It was the forerunner of the style of handwriting used by doctors to write *
* prescriptions. Thus the name: pre (before) script (writing) ion (charged *
* particle). *
------------------------------------------------------------------------------
""".replace(b'\n', b'\n\r') + b"\r\n"
self.prompt1 = b'Press Return to continue:'
self.prompt2 = b'Rate this unit test>'
self.prompt3 = b'Press X to exit:'
self.enter = b'\r\n'
self.exit = b'X\r\n'
self.server_up = multiprocessing.Event()
self.server_process = multiprocessing.Process(target=self.socket_server, args=(self.server_up,))
self.server_process.daemon = True
self.server_process.start()
counter = 0
while not self.server_up.is_set():
time.sleep(0.250)
counter += 1
if counter > (10 / 0.250):
raise SocketServerError("Could not start socket server")
def tearDown(self):
os.kill(self.server_process.pid, signal.SIGINT)
self.server_process.join(timeout=5.0)
PexpectTestCase.PexpectTestCase.tearDown(self)
def socket_server(self, server_up):
sock = None
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.host, self.port))
sock.listen(5)
server_up.set()
while True:
(conn, addr) = sock.accept()
conn.send(self.motd)
conn.send(self.prompt1)
result = conn.recv(1024)
if result != self.enter:
break
conn.send(self.prompt2)
result = conn.recv(1024)
if result != self.enter:
break
conn.send(self.prompt3)
result = conn.recv(1024)
if result.startswith(self.exit[0]):
conn.shutdown(socket.SHUT_RDWR)
conn.close()
except KeyboardInterrupt:
pass
if sock is not None:
try:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
except socket.error:
pass
exit(0)
def socket_fn(self, timed_out, all_read):
result = 0
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock, timeout=10)
# Get all data from server
session.read_nonblocking(size=4096)
all_read.set()
# This read should timeout
session.read_nonblocking(size=4096)
except pexpect.TIMEOUT:
timed_out.set()
result = errno.ETIMEDOUT
exit(result)
def test_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock.fileno(), timeout=10)
session.expect(self.prompt1)
self.assertEqual(session.before, self.motd)
session.send(self.enter)
session.expect(self.prompt2)
session.send(self.enter)
session.expect(self.prompt3)
session.send(self.exit)
session.expect(pexpect.EOF)
self.assertEqual(session.before, b'')
def test_socket_with_write(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock.fileno(), timeout=10)
session.expect(self.prompt1)
self.assertEqual(session.before, self.motd)
session.write(self.enter)
session.expect(self.prompt2)
session.write(self.enter)
session.expect(self.prompt3)
session.write(self.exit)
session.expect(pexpect.EOF)
self.assertEqual(session.before, b'')
def test_not_int(self):
with self.assertRaises(pexpect.ExceptionPexpect):
session = fdpexpect.fdspawn('bogus', timeout=10)
def test_not_file_descriptor(self):
with self.assertRaises(pexpect.ExceptionPexpect):
session = fdpexpect.fdspawn(-1, timeout=10)
def test_timeout(self):
with self.assertRaises(pexpect.TIMEOUT):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock, timeout=10)
session.expect(b'Bogus response')
def test_interrupt(self):
timed_out = multiprocessing.Event()
all_read = multiprocessing.Event()
test_proc = multiprocessing.Process(target=self.socket_fn, args=(timed_out, all_read))
test_proc.daemon = True
test_proc.start()
while not all_read.is_set():
time.sleep(1.0)
os.kill(test_proc.pid, signal.SIGWINCH)
while not timed_out.is_set():
time.sleep(1.0)
test_proc.join(timeout=5.0)
self.assertEqual(test_proc.exitcode, errno.ETIMEDOUT)
def test_multiple_interrupts(self):
timed_out = multiprocessing.Event()
all_read = multiprocessing.Event()
test_proc = multiprocessing.Process(target=self.socket_fn, args=(timed_out, all_read))
test_proc.daemon = True
test_proc.start()
while not all_read.is_set():
time.sleep(1.0)
while not timed_out.is_set():
os.kill(test_proc.pid, signal.SIGWINCH)
time.sleep(1.0)
test_proc.join(timeout=5.0)
self.assertEqual(test_proc.exitcode, errno.ETIMEDOUT)
def test_maxread(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock.fileno(), timeout=10)
session.maxread = 1100
session.expect(self.prompt1)
self.assertEqual(session.before, self.motd)
session.send(self.enter)
session.expect(self.prompt2)
session.send(self.enter)
session.expect(self.prompt3)
session.send(self.exit)
session.expect(pexpect.EOF)
self.assertEqual(session.before, b'')
def test_fd_isalive (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock.fileno(), timeout=10)
assert session.isalive()
sock.close()
assert not session.isalive(), "Should not be alive after close()"
def test_fd_isatty (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock.fileno(), timeout=10)
assert not session.isatty()
session.close()
def test_fileobj(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock, timeout=10) # Should get the fileno from the socket
session.expect(self.prompt1)
session.close()
assert not session.isalive()
session.close() # Smoketest - should be able to call this again
if __name__ == '__main__':
unittest.main()
suite = unittest.makeSuite(ExpectTestCase, 'test')
| apache-2.0 |
rogerwang/chromium | third_party/closure_linter/closure_linter/ecmalintrules.py | 123 | 34907 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core methods for checking EcmaScript files for common style guide violations.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
import re
from closure_linter import checkerbase
from closure_linter import ecmametadatapass
from closure_linter import error_check
from closure_linter import errors
from closure_linter import indentation
from closure_linter import javascripttokens
from closure_linter import javascripttokenizer
from closure_linter import statetracker
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import htmlutil
from closure_linter.common import lintrunner
from closure_linter.common import position
from closure_linter.common import tokens
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
# TODO(robbyw): Check for extra parens on return statements
# TODO(robbyw): Check for 0px in strings
# TODO(robbyw): Ensure inline jsDoc is in {}
# TODO(robbyw): Check for valid JS types in parameter docs
# Shorthand
Context = ecmametadatapass.EcmaContext
Error = error.Error
Modes = javascripttokenizer.JavaScriptModes
Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class EcmaScriptLintRules(checkerbase.LintRulesBase):
"""EmcaScript lint style checking rules.
Can be used to find common style errors in JavaScript, ActionScript and other
Ecma like scripting languages. Style checkers for Ecma scripting languages
should inherit from this style checker.
Please do not add any state to EcmaScriptLintRules or to any subclasses.
All state should be added to the StateTracker subclass used for a particular
language.
"""
# Static constants.
MAX_LINE_LENGTH = 80
MISSING_PARAMETER_SPACE = re.compile(r',\S')
EXTRA_SPACE = re.compile('(\(\s|\s\))')
ENDS_WITH_SPACE = re.compile('\s$')
ILLEGAL_TAB = re.compile(r'\t')
# Regex used to split up complex types to check for invalid use of ? and |.
TYPE_SPLIT = re.compile(r'[,<>()]')
# Regex for form of author lines after the @author tag.
AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
# Acceptable tokens to remove for line too long testing.
LONG_LINE_IGNORE = frozenset(['*', '//', '@see'] +
['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
def __init__(self):
"""Initialize this lint rule object."""
checkerbase.LintRulesBase.__init__(self)
def Initialize(self, checker, limited_doc_checks, is_html):
"""Initialize this lint rule object before parsing a new file."""
checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks,
is_html)
self._indentation = indentation.IndentationRules()
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a @param tag."""
raise TypeError('Abstract method HandleMissingParameterDoc not implemented')
def _CheckLineLength(self, last_token, state):
"""Checks whether the line is too long.
Args:
last_token: The last token in the line.
"""
# Start from the last token so that we have the flag object attached to
# and DOC_FLAG tokens.
line_number = last_token.line_number
token = last_token
# Build a representation of the string where spaces indicate potential
# line-break locations.
line = []
while token and token.line_number == line_number:
if state.IsTypeToken(token):
line.insert(0, 'x' * len(token.string))
elif token.type in (Type.IDENTIFIER, Type.NORMAL):
# Dots are acceptable places to wrap.
line.insert(0, token.string.replace('.', ' '))
else:
line.insert(0, token.string)
token = token.previous
line = ''.join(line)
line = line.rstrip('\n\r\f')
try:
length = len(unicode(line, 'utf-8'))
except:
# Unknown encoding. The line length may be wrong, as was originally the
# case for utf-8 (see bug 1735846). For now just accept the default
# length, but as we find problems we can either add test for other
# possible encodings or return without an error to protect against
# false positives at the cost of more false negatives.
length = len(line)
if length > self.MAX_LINE_LENGTH:
# If the line matches one of the exceptions, then it's ok.
for long_line_regexp in self.GetLongLineExceptions():
if long_line_regexp.match(last_token.line):
return
# If the line consists of only one "word", or multiple words but all
# except one are ignoreable, then it's ok.
parts = set(line.split())
# We allow two "words" (type and name) when the line contains @param
max = 1
if '@param' in parts:
max = 2
# Custom tags like @requires may have url like descriptions, so ignore
# the tag, similar to how we handle @see.
custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags)) > max):
self._HandleError(errors.LINE_TOO_LONG,
'Line too long (%d characters).' % len(line), last_token)
def _CheckJsDocType(self, token):
"""Checks the given type for style errors.
Args:
token: The DOC_FLAG token for the flag whose type to check.
"""
flag = token.attached_object
type = flag.type
if type and type is not None and not type.isspace():
pieces = self.TYPE_SPLIT.split(type)
if len(pieces) == 1 and type.count('|') == 1 and (
type.endswith('|null') or type.startswith('null|')):
self._HandleError(errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
'Prefer "?Type" to "Type|null": "%s"' % type, token)
for p in pieces:
if p.count('|') and p.count('?'):
# TODO(robbyw): We should do actual parsing of JsDoc types. As is,
# this won't report an error for {number|Array.<string>?}, etc.
self._HandleError(errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE,
'JsDoc types cannot contain both "?" and "|": "%s"' % p, token)
if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
flag.type_start_token.type != Type.DOC_START_BRACE or
flag.type_end_token.type != Type.DOC_END_BRACE):
self._HandleError(errors.MISSING_BRACES_AROUND_TYPE,
'Type must always be surrounded by curly braces.', token)
def _CheckForMissingSpaceBeforeToken(self, token):
"""Checks for a missing space at the beginning of a token.
Reports a MISSING_SPACE error if the token does not begin with a space or
the previous token doesn't end with a space and the previous token is on the
same line as the token.
Args:
token: The token being checked
"""
# TODO(user): Check if too many spaces?
if (len(token.string) == len(token.string.lstrip()) and
token.previous and token.line_number == token.previous.line_number and
len(token.previous.string) - len(token.previous.string.rstrip()) == 0):
self._HandleError(
errors.MISSING_SPACE,
'Missing space before "%s"' % token.string,
token,
Position.AtBeginning())
def _ExpectSpaceBeforeOperator(self, token):
"""Returns whether a space should appear before the given operator token.
Args:
token: The operator token.
Returns:
Whether there should be a space before the token.
"""
if token.string == ',' or token.metadata.IsUnaryPostOperator():
return False
# Colons should appear in labels, object literals, the case of a switch
# statement, and ternary operator. Only want a space in the case of the
# ternary operator.
if (token.string == ':' and
token.metadata.context.type in (Context.LITERAL_ELEMENT,
Context.CASE_BLOCK,
Context.STATEMENT)):
return False
if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
return False
return True
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration
state: parser_state object that indicates the current state in the page
"""
# Store some convenience variables
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
last_non_space_token = state.GetLastNonSpaceToken()
type = token.type
# Process the line change.
if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
# TODO(robbyw): Support checking indentation in HTML files.
indentation_errors = self._indentation.CheckToken(token, state)
for indentation_error in indentation_errors:
self._HandleError(*indentation_error)
if last_in_line:
self._CheckLineLength(token, state)
if type == Type.PARAMETERS:
# Find missing spaces in parameter lists.
if self.MISSING_PARAMETER_SPACE.search(token.string):
self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
token)
# Find extra spaces at the beginning of parameter lists. Make sure
# we aren't at the beginning of a continuing multi-line list.
if not first_in_line:
space_count = len(token.string) - len(token.string.lstrip())
if space_count:
self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
token, Position(0, space_count))
elif (type == Type.START_BLOCK and
token.metadata.context.type == Context.BLOCK):
self._CheckForMissingSpaceBeforeToken(token)
elif type == Type.END_BLOCK:
# This check is for object literal end block tokens, but there is no need
# to test that condition since a comma at the end of any other kind of
# block is undoubtedly a parse error.
last_code = token.metadata.last_code
if last_code.IsOperator(','):
self._HandleError(errors.COMMA_AT_END_OF_LITERAL,
'Illegal comma at end of object literal', last_code,
Position.All(last_code.string))
if state.InFunction() and state.IsFunctionClose():
is_immediately_called = (token.next and
token.next.type == Type.START_PAREN)
if state.InTopLevelFunction():
# When the function was top-level and not immediately called, check
# that it's terminated by a semi-colon.
if state.InAssignedFunction():
if not is_immediately_called and (last_in_line or
not token.next.type == Type.SEMICOLON):
self._HandleError(errors.MISSING_SEMICOLON_AFTER_FUNCTION,
'Missing semicolon after function assigned to a variable',
token, Position.AtEnd(token.string))
else:
if not last_in_line and token.next.type == Type.SEMICOLON:
self._HandleError(errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
'Illegal semicolon after function declaration',
token.next, Position.All(token.next.string))
if (state.InInterfaceMethod() and last_code.type != Type.START_BLOCK):
self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
'Interface methods cannot contain code', last_code)
elif (state.IsBlockClose() and
token.next and token.next.type == Type.SEMICOLON):
self._HandleError(errors.REDUNDANT_SEMICOLON,
'No semicolon is required to end a code block',
token.next, Position.All(token.next.string))
elif type == Type.SEMICOLON:
if token.previous and token.previous.type == Type.WHITESPACE:
self._HandleError(errors.EXTRA_SPACE, 'Extra space before ";"',
token.previous, Position.All(token.previous.string))
if token.next and token.next.line_number == token.line_number:
if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
# TODO(robbyw): Error about no multi-statement lines.
pass
elif token.next.type not in (
Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
self._HandleError(errors.MISSING_SPACE,
'Missing space after ";" in for statement',
token.next,
Position.AtBeginning())
last_code = token.metadata.last_code
if last_code and last_code.type == Type.SEMICOLON:
# Allow a single double semi colon in for loops for cases like:
# for (;;) { }.
# NOTE(user): This is not a perfect check, and will not throw an error
# for cases like: for (var i = 0;; i < n; i++) {}, but then your code
# probably won't work either.
for_token = tokenutil.CustomSearch(last_code,
lambda token: token.type == Type.KEYWORD and token.string == 'for',
end_func=lambda token: token.type == Type.SEMICOLON,
distance=None,
reverse=True)
if not for_token:
self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
token, Position.All(token.string))
elif type == Type.START_PAREN:
if token.previous and token.previous.type == Type.KEYWORD:
self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
token, Position.AtBeginning())
elif token.previous and token.previous.type == Type.WHITESPACE:
before_space = token.previous.previous
if (before_space and before_space.line_number == token.line_number and
before_space.type == Type.IDENTIFIER):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before "("',
token.previous, Position.All(token.previous.string))
elif type == Type.START_BRACKET:
self._HandleStartBracket(token, last_non_space_token)
elif type in (Type.END_PAREN, Type.END_BRACKET):
# Ensure there is no space before closing parentheses, except when
# it's in a for statement with an omitted section, or when it's at the
# beginning of a line.
if (token.previous and token.previous.type == Type.WHITESPACE and
not token.previous.IsFirstInLine() and
not (last_non_space_token and last_non_space_token.line_number ==
token.line_number and
last_non_space_token.type == Type.SEMICOLON)):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before "%s"' %
token.string, token.previous, Position.All(token.previous.string))
if token.type == Type.END_BRACKET:
last_code = token.metadata.last_code
if last_code.IsOperator(','):
self._HandleError(errors.COMMA_AT_END_OF_LITERAL,
'Illegal comma at end of array literal', last_code,
Position.All(last_code.string))
elif type == Type.WHITESPACE:
if self.ILLEGAL_TAB.search(token.string):
if token.IsFirstInLine():
if token.next:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace before "%s"' % token.next.string,
token, Position.All(token.string))
else:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace',
token, Position.All(token.string))
else:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace after "%s"' % token.previous.string,
token, Position.All(token.string))
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if last_in_line:
# Check for extra whitespace at the end of a line.
self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
token, Position.All(token.string))
elif not first_in_line and not token.next.IsComment():
if token.length > 1:
self._HandleError(errors.EXTRA_SPACE, 'Extra space after "%s"' %
token.previous.string, token,
Position(1, len(token.string) - 1))
elif type == Type.OPERATOR:
last_code = token.metadata.last_code
if not self._ExpectSpaceBeforeOperator(token):
if (token.previous and token.previous.type == Type.WHITESPACE and
last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER)):
self._HandleError(errors.EXTRA_SPACE,
'Extra space before "%s"' % token.string, token.previous,
Position.All(token.previous.string))
elif (token.previous and
not token.previous.IsComment() and
token.previous.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.MISSING_SPACE,
'Missing space before "%s"' % token.string, token,
Position.AtBeginning())
# Check that binary operators are not used to start lines.
if ((not last_code or last_code.line_number != token.line_number) and
not token.metadata.IsUnaryOperator()):
self._HandleError(errors.LINE_STARTS_WITH_OPERATOR,
'Binary operator should go on previous line "%s"' % token.string,
token)
elif type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'bug':
# TODO(robbyw): Check for exactly 1 space on the left.
string = token.next.string.lstrip()
string = string.split(' ', 1)[0]
if not string.isdigit():
self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
'@bug should be followed by a bug number', token)
elif flag.flag_type == 'suppress':
if flag.type is None:
# A syntactically invalid suppress tag will get tokenized as a normal
# flag, indicating an error.
self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX,
'Invalid suppress syntax: should be @suppress {errortype}. '
'Spaces matter.', token)
else:
for suppress_type in flag.type.split('|'):
if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
self._HandleError(errors.INVALID_SUPPRESS_TYPE,
'Invalid suppression type: %s' % suppress_type,
token)
elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
flag.flag_type == 'author'):
# TODO(user): In non strict mode check the author tag for as much as
# it exists, though the full form checked below isn't required.
string = token.next.string
result = self.AUTHOR_SPEC.match(string)
if not result:
self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,
'Author tag line should be of the form: '
'@author foo@somewhere.com (Your Name)',
token.next)
else:
# Check spacing between email address and name. Do this before
# checking earlier spacing so positions are easier to calculate for
# autofixing.
num_spaces = len(result.group(2))
if num_spaces < 1:
self._HandleError(errors.MISSING_SPACE,
'Missing space after email address',
token.next, Position(result.start(2), 0))
elif num_spaces > 1:
self._HandleError(errors.EXTRA_SPACE,
'Extra space after email address',
token.next,
Position(result.start(2) + 1, num_spaces - 1))
# Check for extra spaces before email address. Can't be too few, if
# not at least one we wouldn't match @author tag.
num_spaces = len(result.group(1))
if num_spaces > 1:
self._HandleError(errors.EXTRA_SPACE,
'Extra space before email address',
token.next, Position(1, num_spaces - 1))
elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
not self._limited_doc_checks):
if flag.flag_type == 'param':
if flag.name is None:
self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
'Missing name in @param tag', token)
if not flag.description or flag.description is None:
flag_name = token.type
if 'name' in token.values:
flag_name = '@' + token.values['name']
self._HandleError(errors.MISSING_JSDOC_TAG_DESCRIPTION,
'Missing description in %s tag' % flag_name, token)
else:
self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
# We want punctuation to be inside of any tags ending a description,
# so strip tags before checking description. See bug 1127192. Note
# that depending on how lines break, the real description end token
# may consist only of stripped html and the effective end token can
# be different.
end_token = flag.description_end_token
end_string = htmlutil.StripTags(end_token.string).strip()
while (end_string == '' and not
end_token.type in Type.FLAG_ENDING_TYPES):
end_token = end_token.previous
if end_token.type in Type.FLAG_DESCRIPTION_TYPES:
end_string = htmlutil.StripTags(end_token.string).rstrip()
if not (end_string.endswith('.') or end_string.endswith('?') or
end_string.endswith('!')):
# Find the position for the missing punctuation, inside of any html
# tags.
desc_str = end_token.string.rstrip()
while desc_str.endswith('>'):
start_tag_index = desc_str.rfind('<')
if start_tag_index < 0:
break
desc_str = desc_str[:start_tag_index].rstrip()
end_position = Position(len(desc_str), 0)
self._HandleError(
errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER,
('%s descriptions must end with valid punctuation such as a '
'period.' % token.string),
end_token, end_position)
if flag.flag_type in state.GetDocFlag().HAS_TYPE:
if flag.type_start_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.type_start_token)
if flag.type and flag.type != '' and not flag.type.isspace():
self._CheckJsDocType(token)
if type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
token.values['name'] not in FLAGS.custom_jsdoc_tags):
self._HandleError(errors.INVALID_JSDOC_TAG,
'Invalid JsDoc tag: %s' % token.values['name'], token)
if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
token.values['name'] == 'inheritDoc' and
type == Type.DOC_INLINE_FLAG):
self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
'Unnecessary braces around @inheritDoc',
token)
elif type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
if ((not state.InFunction() or state.InConstructor()) and
not state.InParentheses() and not state.InObjectLiteralDescendant()):
jsdoc = state.GetDocComment()
if not state.HasDocComment(identifier):
# Only test for documentation on identifiers with .s in them to
# avoid checking things like simple variables. We don't require
# documenting assignments to .prototype itself (bug 1880803).
if (not state.InConstructor() and
identifier.find('.') != -1 and not
identifier.endswith('.prototype') and not
self._limited_doc_checks):
comment = state.GetLastComment()
if not (comment and comment.lower().count('jsdoc inherited')):
self._HandleError(errors.MISSING_MEMBER_DOCUMENTATION,
"No docs found for member '%s'" % identifier,
token);
elif jsdoc and (not state.InConstructor() or
identifier.startswith('this.')):
# We are at the top level and the function/member is documented.
if identifier.endswith('_') and not identifier.endswith('__'):
# Can have a private class which inherits documentation from a
# public superclass.
#
# @inheritDoc is deprecated in favor of using @override, and they
if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
and not ('accessControls' in jsdoc.suppressions)):
self._HandleError(errors.INVALID_OVERRIDE_PRIVATE,
'%s should not override a private member.' % identifier,
jsdoc.GetFlag('override').flag_token)
if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
and not ('accessControls' in jsdoc.suppressions)):
self._HandleError(errors.INVALID_INHERIT_DOC_PRIVATE,
'%s should not inherit from a private member.' % identifier,
jsdoc.GetFlag('inheritDoc').flag_token)
if (not jsdoc.HasFlag('private') and
not ('underscore' in jsdoc.suppressions) and not
((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
('accessControls' in jsdoc.suppressions))):
self._HandleError(errors.MISSING_PRIVATE,
'Member "%s" must have @private JsDoc.' %
identifier, token)
if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
self._HandleError(errors.UNNECESSARY_SUPPRESS,
'@suppress {underscore} is not necessary with @private',
jsdoc.suppressions['underscore'])
elif (jsdoc.HasFlag('private') and
not self.InExplicitlyTypedLanguage()):
# It is convention to hide public fields in some ECMA
# implementations from documentation using the @private tag.
self._HandleError(errors.EXTRA_PRIVATE,
'Member "%s" must not have @private JsDoc' %
identifier, token)
# These flags are only legal on localizable message definitions;
# such variables always begin with the prefix MSG_.
for f in ('desc', 'hidden', 'meaning'):
if (jsdoc.HasFlag(f)
and not identifier.startswith('MSG_')
and identifier.find('.MSG_') == -1):
self._HandleError(errors.INVALID_USE_OF_DESC_TAG,
'Member "%s" should not have @%s JsDoc' % (identifier, f),
token)
# Check for illegaly assigning live objects as prototype property values.
index = identifier.find('.prototype.')
# Ignore anything with additional .s after the prototype.
if index != -1 and identifier.find('.', index + 11) == -1:
equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)
if next_code and (
next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
next_code.IsOperator('new')):
self._HandleError(errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
'Member %s cannot have a non-primitive value' % identifier,
token)
elif type == Type.END_PARAMETERS:
# Find extra space at the end of parameter lists. We check the token
# prior to the current one when it is a closing paren.
if (token.previous and token.previous.type == Type.PARAMETERS
and self.ENDS_WITH_SPACE.search(token.previous.string)):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
token.previous)
jsdoc = state.GetDocComment()
if state.GetFunction().is_interface:
if token.previous and token.previous.type == Type.PARAMETERS:
self._HandleError(errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
'Interface constructor cannot have parameters',
token.previous)
elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
and not jsdoc.InheritsDocumentation()
and not state.InObjectLiteralDescendant() and not
jsdoc.IsInvalidated()):
distance, edit = jsdoc.CompareParameters(state.GetParams())
if distance:
params_iter = iter(state.GetParams())
docs_iter = iter(jsdoc.ordered_params)
for op in edit:
if op == 'I':
# Insertion.
# Parsing doc comments is the same for all languages
# but some languages care about parameters that don't have
# doc comments and some languages don't care.
# Languages that don't allow variables to by typed such as
# JavaScript care but languages such as ActionScript or Java
# that allow variables to be typed don't care.
if not self._limited_doc_checks:
self.HandleMissingParameterDoc(token, params_iter.next())
elif op == 'D':
# Deletion
self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
'Found docs for non-existing parameter: "%s"' %
docs_iter.next(), token)
elif op == 'S':
# Substitution
if not self._limited_doc_checks:
self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION,
'Parameter mismatch: got "%s", expected "%s"' %
(params_iter.next(), docs_iter.next()), token)
else:
# Equality - just advance the iterators
params_iter.next()
docs_iter.next()
elif type == Type.STRING_TEXT:
# If this is the first token after the start of the string, but it's at
# the end of a line, we know we have a multi-line string.
if token.previous.type in (Type.SINGLE_QUOTE_STRING_START,
Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
self._HandleError(errors.MULTI_LINE_STRING,
'Multi-line strings are not allowed', token)
# This check is orthogonal to the ones above, and repeats some types, so
# it is a plain if and not an elif.
if token.type in Type.COMMENT_TYPES:
if self.ILLEGAL_TAB.search(token.string):
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in comment "%s"' % token.string, token)
trimmed = token.string.rstrip()
if last_in_line and token.string != trimmed:
# Check for extra whitespace at the end of a line.
self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
token, Position(len(trimmed), len(token.string) - len(trimmed)))
# This check is also orthogonal since it is based on metadata.
if token.metadata.is_implied_semicolon:
self._HandleError(errors.MISSING_SEMICOLON,
'Missing semicolon at end of line', token)
def _HandleStartBracket(self, token, last_non_space_token):
"""Handles a token that is an open bracket.
Args:
token: The token to handle.
last_non_space_token: The last token that was not a space.
"""
if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
last_non_space_token and
last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before "["',
token.previous, Position.All(token.previous.string))
# If the [ token is the first token in a line we shouldn't complain
# about a missing space before [. This is because some Ecma script
# languages allow syntax like:
# [Annotation]
# class MyClass {...}
# So we don't want to blindly warn about missing spaces before [.
# In the the future, when rules for computing exactly how many spaces
# lines should be indented are added, then we can return errors for
# [ tokens that are improperly indented.
# For example:
# var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
# [a,b,c];
# should trigger a proper indentation warning message as [ is not indented
# by four spaces.
elif (not token.IsFirstInLine() and token.previous and
not token.previous.type in (
[Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
Type.EXPRESSION_ENDER_TYPES)):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
token, Position.AtBeginning())
def Finalize(self, state, tokenizer_mode):
last_non_space_token = state.GetLastNonSpaceToken()
# Check last line for ending with newline.
if state.GetLastLine() and not (state.GetLastLine().isspace() or
state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()):
self._HandleError(
errors.FILE_MISSING_NEWLINE,
'File does not end with new line. (%s)' % state.GetLastLine(),
last_non_space_token)
# Check that the mode is not mid comment, argument list, etc.
if not tokenizer_mode == Modes.TEXT_MODE:
self._HandleError(
errors.FILE_IN_BLOCK,
'File ended in mode "%s".' % tokenizer_mode,
last_non_space_token)
try:
self._indentation.Finalize()
except Exception, e:
self._HandleError(
errors.FILE_DOES_NOT_PARSE,
str(e),
last_non_space_token)
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit."""
return []
def InExplicitlyTypedLanguage(self):
"""Returns whether this ecma implementation is explicitly typed."""
return False
| bsd-3-clause |
Ms2ger/servo | tests/wpt/web-platform-tests/tools/six/six.py | 426 | 27961 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.8.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| mpl-2.0 |
trondhindenes/ansible | lib/ansible/plugins/lookup/mongodb.py | 84 | 8872 | # (c) 2016, Marcos Diez <marcos@unitron.com.br>
# https://github.com/marcosdiez/
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.six import string_types, integer_types
__metaclass__ = type
DOCUMENTATION = '''
author: 'Marcos Diez <marcos (at) unitron.com.br>'
lookup: mongodb
version_added: "2.3"
short_description: lookup info from MongoDB
description:
- 'The ``MongoDB`` lookup runs the *find()* command on a given *collection* on a given *MongoDB* server.'
- 'The result is a list of jsons, so slightly different from what PyMongo returns. In particular, *timestamps* are converted to epoch integers.'
options:
connect_string:
description:
- Can be any valid MongoDB connection string, supporting authentication, replicasets, etc.
- "More info at U(https://docs.mongodb.org/manual/reference/connection-string/)"
default: "mongodb://localhost/"
database:
description:
- Name of the database which the query will be made
required: True
collection:
description:
- Name of the collection which the query will be made
required: True
filter:
description:
- Criteria of the output
type: 'dict'
default: '{}'
projection:
description:
- Fields you want returned
type: dict
default: "{}"
skip:
description:
- How many results should be skipped
type: integer
limit:
description:
- How many results should be shown
type: integer
sort:
description:
- Sorting rules. Please notice the constats are replaced by strings.
type: list
default: "[]"
notes:
- "Please check https://api.mongodb.org/python/current/api/pymongo/collection.html?highlight=find#pymongo.collection.Collection.find for more details."
requirements:
- pymongo >= 2.4 (python library)
'''
EXAMPLES = '''
- hosts: all
gather_facts: false
vars:
mongodb_parameters:
#mandatory parameters
database: 'local'
#optional
collection: "startup_log"
connection_string: "mongodb://localhost/"
extra_connection_parameters: { "ssl" : True , "ssl_certfile": /etc/self_signed_certificate.pem" }
#optional query parameters, we accept any parameter from the normal mongodb query.
filter: { "hostname": "batman" }
projection: { "pid": True , "_id" : False , "hostname" : True }
skip: 0
limit: 1
sort: [ [ "startTime" , "ASCENDING" ] , [ "age", "DESCENDING" ] ]
tasks:
- debug: msg="Mongo has already started with the following PID [{{ item.pid }}]"
with_mongodb: "{{mongodb_parameters}}"
'''
import datetime
try:
from pymongo import ASCENDING, DESCENDING
from pymongo.errors import ConnectionFailure
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
from pymongo import Connection as MongoClient
except ImportError:
pymongo_found = False
else:
pymongo_found = True
else:
pymongo_found = True
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def _fix_sort_parameter(self, sort_parameter):
if sort_parameter is None:
return sort_parameter
if not isinstance(sort_parameter, list):
raise AnsibleError(u"Error. Sort parameters must be a list, not [ {0} ]".format(sort_parameter))
for item in sort_parameter:
self._convert_sort_string_to_constant(item)
return sort_parameter
def _convert_sort_string_to_constant(self, item):
original_sort_order = item[1]
sort_order = original_sort_order.upper()
if sort_order == u"ASCENDING":
item[1] = ASCENDING
elif sort_order == u"DESCENDING":
item[1] = DESCENDING
# else the user knows what s/he is doing and we won't predict. PyMongo will return an error if necessary
def convert_mongo_result_to_valid_json(self, result):
if result is None:
return result
if isinstance(result, integer_types + (float, bool)):
return result
if isinstance(result, string_types):
return result
elif isinstance(result, list):
new_list = []
for elem in result:
new_list.append(self.convert_mongo_result_to_valid_json(elem))
return new_list
elif isinstance(result, dict):
new_dict = {}
for key in result.keys():
value = result[key] # python2 and 3 compatible....
new_dict[key] = self.convert_mongo_result_to_valid_json(value)
return new_dict
elif isinstance(result, datetime.datetime):
# epoch
return (result - datetime.datetime(1970, 1, 1)). total_seconds()
else:
# failsafe
return u"{0}".format(result)
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
u'''
Makes a MongoDB query and returns the output as a valid list of json.
Timestamps are converted to epoch integers/longs.
Here is a sample playbook that uses it:
-------------------------------------------------------------------------------
- hosts: all
gather_facts: false
vars:
mongodb_parameters:
#optional parameter, default = "mongodb://localhost/"
# connection_string: "mongodb://localhost/"
#mandatory parameters
database: 'local'
collection: "startup_log"
#optional query parameters
#we accept any parameter from the normal mongodb query.
# the official documentation is here
# https://api.mongodb.org/python/current/api/pymongo/collection.html?highlight=find#pymongo.collection.Collection.find
# filter: { "hostname": "batman" }
# projection: { "pid": True , "_id" : False , "hostname" : True }
# skip: 0
# limit: 1
# sort: [ [ "startTime" , "ASCENDING" ] , [ "age", "DESCENDING" ] ]
# extra_connection_parameters = { }
# dictionary with extra parameters like ssl, ssl_keyfile, maxPoolSize etc...
# the full list is available here. It varies from PyMongo version
# https://api.mongodb.org/python/current/api/pymongo/mongo_client.html#pymongo.mongo_client.MongoClient
tasks:
- debug: msg="Mongo has already started with the following PID [{{ item.pid }}] - full_data {{ item }} "
with_items:
- "{{ lookup('mongodb', mongodb_parameters) }}"
-------------------------------------------------------------------------------
'''
connection_string = term.get(u'connection_string', u"mongodb://localhost")
database = term[u"database"]
collection = term[u'collection']
extra_connection_parameters = term.get(u'extra_connection_parameters', {})
if u"extra_connection_parameters" in term:
del term[u"extra_connection_parameters"]
if u"connection_string" in term:
del term[u"connection_string"]
del term[u"database"]
del term[u"collection"]
if u"sort" in term:
term[u"sort"] = self._fix_sort_parameter(term[u"sort"])
# all other parameters are sent to mongo, so we are future and past proof
try:
client = MongoClient(connection_string, **extra_connection_parameters)
results = client[database][collection].find(**term)
for result in results:
result = self.convert_mongo_result_to_valid_json(result)
ret.append(result)
except ConnectionFailure as e:
raise AnsibleError(u'unable to connect to database: %s' % str(e))
return ret
| gpl-3.0 |
vhaupert/mitmproxy | test/pathod/language/test_base.py | 8 | 9571 | import pytest
from pathod import language
from pathod.language import base, exceptions
def parse_request(s):
return language.parse_pathoc(s).next()
def test_times():
reqs = list(language.parse_pathoc("get:/:x5"))
assert len(reqs) == 5
assert not reqs[0].times
def test_caseless_literal():
class CL(base.CaselessLiteral):
TOK = "foo"
v = CL("foo")
assert v.expr()
assert v.values(language.Settings())
class TestTokValueNakedLiteral:
def test_expr(self):
v = base.TokValueNakedLiteral("foo")
assert v.expr()
def test_spec(self):
v = base.TokValueNakedLiteral("foo")
assert v.spec() == repr(v) == "foo"
v = base.TokValueNakedLiteral("f\x00oo")
assert v.spec() == repr(v) == r"f\x00oo"
class TestTokValueLiteral:
def test_expr(self):
v = base.TokValueLiteral("foo")
assert v.expr()
assert v.val == b"foo"
v = base.TokValueLiteral("foo\n")
assert v.expr()
assert v.val == b"foo\n"
assert repr(v)
def test_spec(self):
v = base.TokValueLiteral("foo")
assert v.spec() == r"'foo'"
v = base.TokValueLiteral("f\x00oo")
assert v.spec() == repr(v) == r"'f\x00oo'"
v = base.TokValueLiteral('"')
assert v.spec() == repr(v) == """ '"' """.strip()
# While pyparsing has a escChar argument for QuotedString,
# escChar only performs scapes single-character escapes and does not work for e.g. r"\x02".
# Thus, we cannot use that option, which means we cannot have single quotes in strings.
# To fix this, we represent single quotes as r"\x07".
v = base.TokValueLiteral("'")
assert v.spec() == r"'\x27'"
def roundtrip(self, spec):
e = base.TokValueLiteral.expr()
v = base.TokValueLiteral(spec)
v2 = e.parseString(v.spec())
assert v.val == v2[0].val
assert v.spec() == v2[0].spec()
def test_roundtrip(self):
self.roundtrip("'")
self.roundtrip(r"\'")
self.roundtrip("a")
self.roundtrip("\"")
# self.roundtrip("\\")
self.roundtrip("200:b'foo':i23,'\\''")
self.roundtrip("\a")
class TestTokValueGenerate:
def test_basic(self):
v = base.TokValue.parseString("@10b")[0]
assert v.usize == 10
assert v.unit == "b"
assert v.bytes() == 10
v = base.TokValue.parseString("@10")[0]
assert v.unit == "b"
v = base.TokValue.parseString("@10k")[0]
assert v.bytes() == 10240
v = base.TokValue.parseString("@10g")[0]
assert v.bytes() == 1024 ** 3 * 10
v = base.TokValue.parseString("@10g,digits")[0]
assert v.datatype == "digits"
g = v.get_generator({})
assert g[:100]
v = base.TokValue.parseString("@10,digits")[0]
assert v.unit == "b"
assert v.datatype == "digits"
def test_spec(self):
v = base.TokValueGenerate(1, "b", "bytes")
assert v.spec() == repr(v) == "@1"
v = base.TokValueGenerate(1, "k", "bytes")
assert v.spec() == repr(v) == "@1k"
v = base.TokValueGenerate(1, "k", "ascii")
assert v.spec() == repr(v) == "@1k,ascii"
v = base.TokValueGenerate(1, "b", "ascii")
assert v.spec() == repr(v) == "@1,ascii"
def test_freeze(self):
v = base.TokValueGenerate(100, "b", "ascii")
f = v.freeze(language.Settings())
assert len(f.val) == 100
class TestTokValueFile:
def test_file_value(self):
v = base.TokValue.parseString("<'one two'")[0]
assert str(v)
assert v.path == "one two"
v = base.TokValue.parseString("<path")[0]
assert v.path == "path"
def test_access_control(self, tmpdir):
v = base.TokValue.parseString("<path")[0]
f = tmpdir.join("path")
f.write(b"x" * 10000)
assert v.get_generator(language.Settings(staticdir=str(tmpdir)))
v = base.TokValue.parseString("<path2")[0]
with pytest.raises(exceptions.FileAccessDenied):
v.get_generator(language.Settings(staticdir=str(tmpdir)))
with pytest.raises(Exception, match="access disabled"):
v.get_generator(language.Settings())
v = base.TokValue.parseString("</outside")[0]
with pytest.raises(Exception, match="outside"):
v.get_generator(language.Settings(staticdir=str(tmpdir)))
def test_spec(self):
v = base.TokValue.parseString("<'one two'")[0]
v2 = base.TokValue.parseString(v.spec())[0]
assert v2.path == "one two"
def test_freeze(self):
v = base.TokValue.parseString("<'one two'")[0]
v2 = v.freeze({})
assert v2.path == v.path
class TestMisc:
def test_generators(self):
v = base.TokValue.parseString("'val'")[0]
g = v.get_generator({})
assert g[:] == b"val"
def test_value(self):
assert base.TokValue.parseString("'val'")[0].val == b"val"
assert base.TokValue.parseString('"val"')[0].val == b"val"
assert base.TokValue.parseString('"\'val\'"')[0].val == b"'val'"
def test_value2(self):
class TT(base.Value):
preamble = "m"
e = TT.expr()
v = e.parseString("m'msg'")[0]
assert v.value.val == b"msg"
s = v.spec()
assert s == e.parseString(s)[0].spec()
v = e.parseString("m@100")[0]
v2 = v.freeze({})
v3 = v2.freeze({})
assert v2.value.val == v3.value.val
def test_fixedlengthvalue(self, tmpdir):
class TT(base.FixedLengthValue):
preamble = "m"
length = 4
e = TT.expr()
assert e.parseString("m@4")
with pytest.raises(Exception, match="Invalid value length"):
e.parseString("m@100")
with pytest.raises(Exception, match="Invalid value length"):
e.parseString("m@1")
s = base.Settings(staticdir=str(tmpdir))
with open(str(tmpdir.join("path")), 'wb') as f:
f.write(b"a" * 20)
v = e.parseString("m<path")[0]
with pytest.raises(Exception, match="Invalid value length"):
v.values(s)
with open(str(tmpdir.join("path2")), 'wb') as f:
f.write(b"a" * 4)
v = e.parseString("m<path2")[0]
assert v.values(s)
class TKeyValue(base.KeyValue):
preamble = "h"
def values(self, settings):
return [
self.key.get_generator(settings),
": ",
self.value.get_generator(settings),
"\r\n",
]
class TestKeyValue:
def test_simple(self):
e = TKeyValue.expr()
v = e.parseString("h'foo'='bar'")[0]
assert v.key.val == b"foo"
assert v.value.val == b"bar"
v2 = e.parseString(v.spec())[0]
assert v2.key.val == v.key.val
assert v2.value.val == v.value.val
s = v.spec()
assert s == e.parseString(s)[0].spec()
def test_freeze(self):
e = TKeyValue.expr()
v = e.parseString("h@10=@10'")[0]
v2 = v.freeze({})
v3 = v2.freeze({})
assert v2.key.val == v3.key.val
assert v2.value.val == v3.value.val
def test_intfield():
class TT(base.IntField):
preamble = "t"
names = {
"one": 1,
"two": 2,
"three": 3
}
max = 4
e = TT.expr()
v = e.parseString("tone")[0]
assert v.value == 1
assert v.spec() == "tone"
assert v.values(language.Settings())
v = e.parseString("t1")[0]
assert v.value == 1
assert v.spec() == "t1"
v = e.parseString("t4")[0]
assert v.value == 4
assert v.spec() == "t4"
with pytest.raises(Exception, match="can't exceed"):
e.parseString("t5")
def test_options_or_value():
class TT(base.OptionsOrValue):
options = [
"one",
"two",
"three"
]
e = TT.expr()
assert e.parseString("one")[0].value.val == b"one"
assert e.parseString("'foo'")[0].value.val == b"foo"
assert e.parseString("'get'")[0].value.val == b"get"
assert e.parseString("one")[0].spec() == "one"
assert e.parseString("'foo'")[0].spec() == "'foo'"
s = e.parseString("one")[0].spec()
assert s == e.parseString(s)[0].spec()
s = e.parseString("'foo'")[0].spec()
assert s == e.parseString(s)[0].spec()
v = e.parseString("@100")[0]
v2 = v.freeze({})
v3 = v2.freeze({})
assert v2.value.val == v3.value.val
def test_integer():
e = base.Integer.expr()
v = e.parseString("200")[0]
assert v.string() == b"200"
assert v.spec() == "200"
assert v.freeze({}).value == v.value
class BInt(base.Integer):
bounds = (1, 5)
with pytest.raises(Exception, match="must be between"):
BInt(0)
with pytest.raises(Exception, match="must be between"):
BInt(6)
assert BInt(5)
assert BInt(1)
assert BInt(3)
class TBoolean(base.Boolean):
name = "test"
def test_unique_name():
b = TBoolean(True)
assert b.unique_name
class test_boolean:
e = TBoolean.expr()
assert e.parseString("test")[0].value
assert not e.parseString("-test")[0].value
def roundtrip(s):
e = TBoolean.expr()
s2 = e.parseString(s)[0].spec()
v1 = e.parseString(s)[0].value
v2 = e.parseString(s2)[0].value
assert s == s2
assert v1 == v2
roundtrip("test")
roundtrip("-test")
| mit |
TathagataChakraborti/resource-conflicts | PLANROB-2015/seq-sat-lama/py2.5/lib/python2.5/test/test_copy_reg.py | 82 | 4260 | import copy_reg
import unittest
from test import test_support
from test.pickletester import ExtensionSaver
class C:
pass
class WithoutSlots(object):
pass
class WithWeakref(object):
__slots__ = ('__weakref__',)
class WithPrivate(object):
__slots__ = ('__spam',)
class WithSingleString(object):
__slots__ = 'spam'
class WithInherited(WithSingleString):
__slots__ = ('eggs',)
class CopyRegTestCase(unittest.TestCase):
def test_class(self):
self.assertRaises(TypeError, copy_reg.pickle,
C, None, None)
def test_noncallable_reduce(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), "not a callable")
def test_noncallable_constructor(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), int, "not a callable")
def test_bool(self):
import copy
self.assertEquals(True, copy.copy(True))
def test_extension_registry(self):
mod, func, code = 'junk1 ', ' junk2', 0xabcd
e = ExtensionSaver(code)
try:
# Shouldn't be in registry now.
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code)
copy_reg.add_extension(mod, func, code)
# Should be in the registry.
self.assert_(copy_reg._extension_registry[mod, func] == code)
self.assert_(copy_reg._inverted_registry[code] == (mod, func))
# Shouldn't be in the cache.
self.assert_(code not in copy_reg._extension_cache)
# Redundant registration should be OK.
copy_reg.add_extension(mod, func, code) # shouldn't blow up
# Conflicting code.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code + 1)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code + 1)
# Conflicting module name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod[1:], func, code )
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func, code )
# Conflicting function name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func[1:], code)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func[1:], code)
# Can't remove one that isn't registered at all.
if code + 1 not in copy_reg._inverted_registry:
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func[1:], code + 1)
finally:
e.restore()
# Shouldn't be there anymore.
self.assert_((mod, func) not in copy_reg._extension_registry)
# The code *may* be in copy_reg._extension_registry, though, if
# we happened to pick on a registered code. So don't check for
# that.
# Check valid codes at the limits.
for code in 1, 0x7fffffff:
e = ExtensionSaver(code)
try:
copy_reg.add_extension(mod, func, code)
copy_reg.remove_extension(mod, func, code)
finally:
e.restore()
# Ensure invalid codes blow up.
for code in -1, 0, 0x80000000L:
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code)
def test_slotnames(self):
self.assertEquals(copy_reg._slotnames(WithoutSlots), [])
self.assertEquals(copy_reg._slotnames(WithWeakref), [])
expected = ['_WithPrivate__spam']
self.assertEquals(copy_reg._slotnames(WithPrivate), expected)
self.assertEquals(copy_reg._slotnames(WithSingleString), ['spam'])
expected = ['eggs', 'spam']
expected.sort()
result = copy_reg._slotnames(WithInherited)
result.sort()
self.assertEquals(result, expected)
def test_main():
test_support.run_unittest(CopyRegTestCase)
if __name__ == "__main__":
test_main()
| mit |
archen/django-storages | storages/backends/ftp.py | 23 | 8542 | # FTP storage class for Django pluggable storage system.
# Author: Rafal Jonca <jonca.rafal@gmail.com>
# License: MIT
# Comes from http://www.djangosnippets.org/snippets/1269/
#
# Usage:
#
# Add below to settings.py:
# FTP_STORAGE_LOCATION = '[a]ftp://<user>:<pass>@<host>:<port>/[path]'
#
# In models.py you can write:
# from FTPStorage import FTPStorage
# fs = FTPStorage()
# class FTPTest(models.Model):
# file = models.FileField(upload_to='a/b/c/', storage=fs)
import os
import ftplib
import urlparse
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.exceptions import ImproperlyConfigured
class FTPStorageException(Exception):
pass
class FTPStorage(Storage):
"""FTP Storage class for Django pluggable storage system."""
def __init__(self, location=settings.FTP_STORAGE_LOCATION,
base_url=settings.MEDIA_URL):
self._config = self._decode_location(location)
self._base_url = base_url
self._connection = None
def _decode_location(self, location):
"""Return splitted configuration data from location."""
splitted_url = urlparse.urlparse(location)
config = {}
if splitted_url.scheme not in ('ftp', 'aftp'):
raise ImproperlyConfigured(
'FTPStorage works only with FTP protocol!'
)
if splitted_url.hostname == '':
raise ImproperlyConfigured('You must at least provide hostname!')
if splitted_url.scheme == 'aftp':
config['active'] = True
else:
config['active'] = False
config['path'] = splitted_url.path
config['host'] = splitted_url.hostname
config['user'] = splitted_url.username
config['passwd'] = splitted_url.password
config['port'] = int(splitted_url.port)
return config
def _start_connection(self):
# Check if connection is still alive and if not, drop it.
if self._connection is not None:
try:
self._connection.pwd()
except ftplib.all_errors:
self._connection = None
# Real reconnect
if self._connection is None:
ftp = ftplib.FTP()
try:
ftp.connect(self._config['host'], self._config['port'])
ftp.login(self._config['user'], self._config['passwd'])
if self._config['active']:
ftp.set_pasv(False)
if self._config['path'] != '':
ftp.cwd(self._config['path'])
self._connection = ftp
return
except ftplib.all_errors:
raise FTPStorageException(
'Connection or login error using data %s'
% repr(self._config)
)
def disconnect(self):
self._connection.quit()
self._connection = None
def _mkremdirs(self, path):
pwd = self._connection.pwd()
path_splitted = path.split('/')
for path_part in path_splitted:
try:
self._connection.cwd(path_part)
except:
try:
self._connection.mkd(path_part)
self._connection.cwd(path_part)
except ftplib.all_errors:
raise FTPStorageException(
'Cannot create directory chain %s' % path
)
self._connection.cwd(pwd)
return
def _put_file(self, name, content):
# Connection must be open!
try:
self._mkremdirs(os.path.dirname(name))
pwd = self._connection.pwd()
self._connection.cwd(os.path.dirname(name))
self._connection.storbinary('STOR ' + os.path.basename(name),
content.file,
content.DEFAULT_CHUNK_SIZE)
self._connection.cwd(pwd)
except ftplib.all_errors:
raise FTPStorageException('Error writing file %s' % name)
def _open(self, name, mode='rb'):
remote_file = FTPStorageFile(name, self, mode=mode)
return remote_file
def _read(self, name):
memory_file = StringIO()
try:
pwd = self._connection.pwd()
self._connection.cwd(os.path.dirname(name))
self._connection.retrbinary('RETR ' + os.path.basename(name),
memory_file.write)
self._connection.cwd(pwd)
return memory_file
except ftplib.all_errors:
raise FTPStorageException('Error reading file %s' % name)
def _save(self, name, content):
content.open()
self._start_connection()
self._put_file(name, content)
content.close()
return name
def _get_dir_details(self, path):
# Connection must be open!
try:
lines = []
self._connection.retrlines('LIST ' + path, lines.append)
dirs = {}
files = {}
for line in lines:
words = line.split()
if len(words) < 6:
continue
if words[-2] == '->':
continue
if words[0][0] == 'd':
dirs[words[-1]] = 0
elif words[0][0] == '-':
files[words[-1]] = int(words[-5])
return dirs, files
except ftplib.all_errors:
raise FTPStorageException('Error getting listing for %s' % path)
def listdir(self, path):
self._start_connection()
try:
dirs, files = self._get_dir_details(path)
return dirs.keys(), files.keys()
except FTPStorageException:
raise
def delete(self, name):
if not self.exists(name):
return
self._start_connection()
try:
self._connection.delete(name)
except ftplib.all_errors:
raise FTPStorageException('Error when removing %s' % name)
def exists(self, name):
self._start_connection()
try:
if os.path.basename(name) in self._connection.nlst(
os.path.dirname(name) + '/'
):
return True
else:
return False
except ftplib.error_temp:
return False
except ftplib.error_perm:
# error_perm: 550 Can't find file
return False
except ftplib.all_errors:
raise FTPStorageException('Error when testing existence of %s'
% name)
def size(self, name):
self._start_connection()
try:
dirs, files = self._get_dir_details(os.path.dirname(name))
if os.path.basename(name) in files:
return files[os.path.basename(name)]
else:
return 0
except FTPStorageException:
return 0
def url(self, name):
if self._base_url is None:
raise ValueError("This file is not accessible via a URL.")
return urlparse.urljoin(self._base_url, name).replace('\\', '/')
class FTPStorageFile(File):
def __init__(self, name, storage, mode):
self._name = name
self._storage = storage
self._mode = mode
self._is_dirty = False
self.file = StringIO()
self._is_read = False
@property
def size(self):
if not hasattr(self, '_size'):
self._size = self._storage.size(self._name)
return self._size
def read(self, num_bytes=None):
if not self._is_read:
self._storage._start_connection()
self.file = self._storage._read(self._name)
self._storage._end_connection()
self._is_read = True
return self.file.read(num_bytes)
def write(self, content):
if 'w' not in self._mode:
raise AttributeError("File was opened for read-only access.")
self.file = StringIO(content)
self._is_dirty = True
self._is_read = True
def close(self):
if self._is_dirty:
self._storage._start_connection()
self._storage._put_file(self._name, self)
self._storage.disconnect()
self.file.close()
| bsd-3-clause |
zzicewind/nova | nova/cmd/xvpvncproxy.py | 50 | 1118 | # Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XVP VNC Console Proxy Server."""
import sys
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from nova import config
from nova import service
from nova import version
from nova.vnc import xvp_proxy
def main():
config.parse_args(sys.argv)
logging.setup(config.CONF, "nova")
gmr.TextGuruMeditation.setup_autorun(version)
wsgi_server = xvp_proxy.get_wsgi_server()
service.serve(wsgi_server)
service.wait()
| apache-2.0 |
haeusser/tensorflow | tensorflow/contrib/graph_editor/util.py | 18 | 16833 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility funtions for the graph_editor.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from six import iteritems
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.ops import array_ops as tf_array_ops
__all__ = [
"make_list_of_op",
"get_tensors",
"make_list_of_t",
"get_generating_ops",
"get_consuming_ops",
"ControlOutputs",
"placeholder_name",
"make_placeholder_from_tensor",
"make_placeholder_from_dtype_and_shape",
]
def concatenate_unique(la, lb):
"""Add all the elements of lb in la if they are not there already."""
for l in lb:
if l not in la:
la.append(l)
return la
# TODO(fkp): very generic code, it should be moved in a more generic place.
class ListView(object):
"""Immutable list wrapper.
This class is strongly inspired by the one in tf.Operation.
"""
def __init__(self, list_):
if not isinstance(list_, list):
raise TypeError("Expected a list, got: {}.".format(type(list_)))
self._list = list_
def __iter__(self):
return iter(self._list)
def __len__(self):
return len(self._list)
def __bool__(self):
return bool(self._list)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._list[i]
def __add__(self, other):
if not isinstance(other, list):
other = list(other)
return list(self) + other
# TODO(fkp): very generic code, it should be moved in a more generic place.
def is_iterable(obj):
"""Return true if the object is iterable."""
try:
_ = iter(obj)
except Exception: # pylint: disable=broad-except
return False
return True
def flatten_tree(tree, leaves=None):
"""Flatten a tree into a list.
Args:
tree: iterable or not. If iterable, its elements (child) can also be
iterable or not.
leaves: list to which the tree leaves are appended (None by default).
Returns:
A list of all the leaves in the tree.
"""
if leaves is None:
leaves = []
if isinstance(tree, dict):
for _, child in iteritems(tree):
flatten_tree(child, leaves)
elif is_iterable(tree):
for child in tree:
flatten_tree(child, leaves)
else:
leaves.append(tree)
return leaves
def transform_tree(tree, fn, iterable_type=tuple):
"""Transform all the nodes of a tree.
Args:
tree: iterable or not. If iterable, its elements (child) can also be
iterable or not.
fn: function to apply to each leaves.
iterable_type: type use to construct the resulting tree for unknwon
iterable, typically `list` or `tuple`.
Returns:
A tree whose leaves has been transformed by `fn`.
The hierarchy of the output tree mimics the one of the input tree.
"""
if is_iterable(tree):
if isinstance(tree, dict):
res = tree.__new__(type(tree))
res.__init__(
(k, transform_tree(child, fn)) for k, child in iteritems(tree))
return res
elif isinstance(tree, tuple):
# NamedTuple?
if hasattr(tree, "_asdict"):
res = tree.__new__(type(tree), **transform_tree(tree._asdict(), fn))
else:
res = tree.__new__(type(tree),
(transform_tree(child, fn) for child in tree))
return res
elif isinstance(tree, collections.Sequence):
res = tree.__new__(type(tree))
res.__init__(transform_tree(child, fn) for child in tree)
return res
else:
return iterable_type(transform_tree(child, fn) for child in tree)
else:
return fn(tree)
def check_graphs(*args):
"""Check that all the element in args belong to the same graph.
Args:
*args: a list of object with a obj.graph property.
Raises:
ValueError: if all the elements do not belong to the same graph.
"""
graph = None
for i, sgv in enumerate(args):
if graph is None and sgv.graph is not None:
graph = sgv.graph
elif sgv.graph is not None and sgv.graph is not graph:
raise ValueError("Argument[{}]: Wrong graph!".format(i))
def get_unique_graph(tops, check_types=None, none_if_empty=False):
"""Return the unique graph used by the all the elements in tops.
Args:
tops: list of elements to check (usually a list of tf.Operation and/or
tf.Tensor). Or a tf.Graph.
check_types: check that the element in tops are of given type(s). If None,
the types (tf.Operation, tf.Tensor) are used.
none_if_empty: don't raise an error if tops is an empty list, just return
None.
Returns:
The unique graph used by all the tops.
Raises:
TypeError: if tops is not a iterable of tf.Operation.
ValueError: if the graph is not unique.
"""
if isinstance(tops, tf_ops.Graph):
return tops
if not is_iterable(tops):
raise TypeError("{} is not iterable".format(type(tops)))
if check_types is None:
check_types = (tf_ops.Operation, tf_ops.Tensor)
elif not is_iterable(check_types):
check_types = (check_types,)
g = None
for op in tops:
if not isinstance(op, check_types):
raise TypeError("Expected a type in ({}), got: {}".format(", ".join([str(
t) for t in check_types]), type(op)))
if g is None:
g = op.graph
elif g is not op.graph:
raise ValueError("Operation {} does not belong to given graph".format(op))
if g is None and not none_if_empty:
raise ValueError("Can't find the unique graph of an empty list")
return g
def make_list_of_op(ops, check_graph=True, allow_graph=True, ignore_ts=False):
"""Convert ops to a list of `tf.Operation`.
Args:
ops: can be an iterable of `tf.Operation`, a `tf.Graph` or a single
operation.
check_graph: if `True` check if all the operations belong to the same graph.
allow_graph: if `False` a `tf.Graph` cannot be converted.
ignore_ts: if True, silently ignore `tf.Tensor`.
Returns:
A newly created list of `tf.Operation`.
Raises:
TypeError: if ops cannot be converted to a list of `tf.Operation` or,
if `check_graph` is `True`, if all the ops do not belong to the
same graph.
"""
if isinstance(ops, tf_ops.Graph):
if allow_graph:
return ops.get_operations()
else:
raise TypeError("allow_graph is False: cannot convert a tf.Graph.")
else:
if not is_iterable(ops):
ops = [ops]
if not ops:
return []
if check_graph:
check_types = None if ignore_ts else tf_ops.Operation
get_unique_graph(ops, check_types=check_types)
return [op for op in ops if isinstance(op, tf_ops.Operation)]
# TODO(fkp): move this function in tf.Graph?
def get_tensors(graph):
"""get all the tensors which are input or output of an op in the graph.
Args:
graph: a `tf.Graph`.
Returns:
A list of `tf.Tensor`.
Raises:
TypeError: if graph is not a `tf.Graph`.
"""
if not isinstance(graph, tf_ops.Graph):
raise TypeError("Expected a graph, got: {}".format(type(graph)))
ts = []
for op in graph.get_operations():
ts += op.outputs
return ts
def make_list_of_t(ts, check_graph=True, allow_graph=True, ignore_ops=False):
"""Convert ts to a list of `tf.Tensor`.
Args:
ts: can be an iterable of `tf.Tensor`, a `tf.Graph` or a single tensor.
check_graph: if `True` check if all the tensors belong to the same graph.
allow_graph: if `False` a `tf.Graph` cannot be converted.
ignore_ops: if `True`, silently ignore `tf.Operation`.
Returns:
A newly created list of `tf.Tensor`.
Raises:
TypeError: if `ts` cannot be converted to a list of `tf.Tensor` or,
if `check_graph` is `True`, if all the ops do not belong to the same graph.
"""
if isinstance(ts, tf_ops.Graph):
if allow_graph:
return get_tensors(ts)
else:
raise TypeError("allow_graph is False: cannot convert a tf.Graph.")
else:
if not is_iterable(ts):
ts = [ts]
if not ts:
return []
if check_graph:
check_types = None if ignore_ops else tf_ops.Tensor
get_unique_graph(ts, check_types=check_types)
return [t for t in ts if isinstance(t, tf_ops.Tensor)]
def get_generating_ops(ts):
"""Return all the generating ops of the tensors in `ts`.
Args:
ts: a list of `tf.Tensor`
Returns:
A list of all the generating `tf.Operation` of the tensors in `ts`.
Raises:
TypeError: if `ts` cannot be converted to a list of `tf.Tensor`.
"""
ts = make_list_of_t(ts, allow_graph=False)
return [t.op for t in ts]
def get_consuming_ops(ts):
"""Return all the consuming ops of the tensors in ts.
Args:
ts: a list of `tf.Tensor`
Returns:
A list of all the consuming `tf.Operation` of the tensors in `ts`.
Raises:
TypeError: if ts cannot be converted to a list of `tf.Tensor`.
"""
ts = make_list_of_t(ts, allow_graph=False)
ops = []
for t in ts:
for op in t.consumers():
if op not in ops:
ops.append(op)
return ops
class ControlOutputs(object):
"""The control outputs topology."""
def __init__(self, graph):
"""Create a dictionary of control-output dependencies.
Args:
graph: a `tf.Graph`.
Returns:
A dictionary where a key is a `tf.Operation` instance and the
corresponding value is a list of all the ops which have the key
as one of their control-input dependencies.
Raises:
TypeError: graph is not a `tf.Graph`.
"""
if not isinstance(graph, tf_ops.Graph):
raise TypeError("Expected a tf.Graph, got: {}".format(type(graph)))
self._control_outputs = {}
self._graph = graph
self._version = None
self._build()
def update(self):
"""Update the control outputs if the graph has changed."""
if self._version != self._graph.version:
self._build()
return self
def _build(self):
"""Build the control outputs dictionary."""
self._control_outputs.clear()
ops = self._graph.get_operations()
for op in ops:
for control_input in op.control_inputs:
if control_input not in self._control_outputs:
self._control_outputs[control_input] = []
if op not in self._control_outputs[control_input]:
self._control_outputs[control_input].append(op)
self._version = self._graph.version
def get_all(self):
return self._control_outputs
def get(self, op):
"""return the control outputs of op."""
if op in self._control_outputs:
return self._control_outputs[op]
else:
return ()
@property
def graph(self):
return self._graph
def scope_finalize(scope):
if scope and scope[-1] != "/":
scope += "/"
return scope
def scope_dirname(scope):
slash = scope.rfind("/")
if slash == -1:
return ""
return scope[:slash + 1]
def scope_basename(scope):
slash = scope.rfind("/")
if slash == -1:
return scope
return scope[slash + 1:]
def placeholder_name(t=None, scope=None):
"""Create placeholder name for the graph editor.
Args:
t: optional tensor on which the placeholder operation's name will be based
on
scope: absolute scope with which to prefix the placeholder's name. None
means that the scope of t is preserved. "" means the root scope.
Returns:
A new placeholder name prefixed by "geph". Note that "geph" stands for
Graph Editor PlaceHolder. This convention allows to quickly identify the
placeholder generated by the Graph Editor.
Raises:
TypeError: if t is not None or a tf.Tensor.
"""
if scope is not None:
scope = scope_finalize(scope)
if t is not None:
if not isinstance(t, tf_ops.Tensor):
raise TypeError("Expected a tf.Tenfor, got: {}".format(type(t)))
op_dirname = scope_dirname(t.op.name)
op_basename = scope_basename(t.op.name)
if scope is None:
scope = op_dirname
if op_basename.startswith("geph__"):
ph_name = op_basename
else:
ph_name = "geph__{}_{}".format(op_basename, t.value_index)
return scope + ph_name
else:
if scope is None:
scope = ""
return scope + "geph"
def make_placeholder_from_tensor(t, scope=None):
"""Create a `tf.placeholder` for the Graph Editor.
Note that the correct graph scope must be set by the calling function.
Args:
t: a `tf.Tensor` whose name will be used to create the placeholder
(see function placeholder_name).
scope: absolute scope within which to create the placeholder. None
means that the scope of `t` is preserved. `""` means the root scope.
Returns:
A newly created `tf.placeholder`.
Raises:
TypeError: if `t` is not `None` or a `tf.Tensor`.
"""
return tf_array_ops.placeholder(
dtype=t.dtype, shape=t.get_shape(), name=placeholder_name(
t, scope=scope))
def make_placeholder_from_dtype_and_shape(dtype, shape=None, scope=None):
"""Create a tf.placeholder for the Graph Editor.
Note that the correct graph scope must be set by the calling function.
The placeholder is named using the function placeholder_name (with no
tensor argument).
Args:
dtype: the tensor type.
shape: the tensor shape (optional).
scope: absolute scope within which to create the placeholder. None
means that the scope of t is preserved. "" means the root scope.
Returns:
A newly created tf.placeholder.
"""
return tf_array_ops.placeholder(
dtype=dtype, shape=shape, name=placeholder_name(scope=scope))
_INTERNAL_VARIABLE_RE = re.compile(r"^__\w+__$")
def get_predefined_collection_names():
"""Return all the predefined collection names."""
return [getattr(tf_ops.GraphKeys, key) for key in dir(tf_ops.GraphKeys)
if not _INTERNAL_VARIABLE_RE.match(key)]
def find_corresponding_elem(target, dst_graph, dst_scope="", src_scope=""):
"""Find corresponding op/tensor in a different graph.
Args:
target: A `tf.Tensor` or a `tf.Operation` belonging to the original graph.
dst_graph: The graph in which the corresponding graph element must be found.
dst_scope: A scope which is prepended to the name to look for.
src_scope: A scope which is removed from the original of `target` name.
Returns:
The corresponding tf.Tensor` or a `tf.Operation`.
Raises:
ValueError: if `src_name` does not start with `src_scope`.
TypeError: if `target` is not a `tf.Tensor` or a `tf.Operation`
KeyError: If the corresponding graph element cannot be found.
"""
src_name = target.name
if src_scope:
src_scope = scope_finalize(src_scope)
if not src_name.startswidth(src_scope):
raise ValueError("{} does not start with {}".format(src_name, src_scope))
src_name = src_name[len(src_scope):]
dst_name = src_name
if dst_scope:
dst_scope = scope_finalize(dst_scope)
dst_name = dst_scope + dst_name
if isinstance(target, tf_ops.Tensor):
return dst_graph.get_tensor_by_name(dst_name)
if isinstance(target, tf_ops.Operation):
return dst_graph.get_operation_by_name(dst_name)
raise TypeError("Expected tf.Tensor or tf.Operation, got: {}", type(target))
def find_corresponding(targets, dst_graph, dst_scope="", src_scope=""):
"""Find corresponding ops/tensors in a different graph.
`targets` is a Python tree, that is, a nested structure of iterable
(list, tupple, dictionary) whose leaves are instances of
`tf.Tensor` or `tf.Operation`
Args:
targets: A Python tree containing `tf.Tensor` or `tf.Operation`
belonging to the original graph.
dst_graph: The graph in which the corresponding graph element must be found.
dst_scope: A scope which is prepended to the name to look for.
src_scope: A scope which is removed from the original of `top` name.
Returns:
A Python tree containin the corresponding tf.Tensor` or a `tf.Operation`.
Raises:
ValueError: if `src_name` does not start with `src_scope`.
TypeError: if `top` is not a `tf.Tensor` or a `tf.Operation`
KeyError: If the corresponding graph element cannot be found.
"""
def func(top):
return find_corresponding_elem(top, dst_graph, dst_scope, src_scope)
return transform_tree(targets, func)
| apache-2.0 |
jj0hns0n/geonode | geonode/contrib/metadataxsl/management/commands/addmissinglinks.py | 23 | 1771 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.core.management.base import BaseCommand
from geonode.base.models import Link, ResourceBase
from geonode.contrib.metadataxsl.models import add_xsl_link
class Command(BaseCommand):
help = 'Add missing links to ISO XSL metadata service'
def handle(self, *args, **options):
for resource in ResourceBase.objects.all():
print('Checking resource with id {}'.format(resource.id))
# check ISO link exists
isolink = Link.objects.filter(resource_id=resource.id, link_type='metadata', name='ISO')
if(isolink):
print (' ISO link found for resource {} "{}"'. format(resource.id, resource.title))
created = add_xsl_link(resource)
if (created):
print(' XSL link created')
else:
print (' ISO link NOT found for resource {} "{}"'. format(resource.id, resource.title))
| gpl-3.0 |
michaelBenin/geonode | geonode/search/tests.py | 1 | 15128 | #########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.test.client import Client
from django.test import TestCase
from django.core.urlresolvers import reverse
from geonode.security.enumerations import AUTHENTICATED_USERS, ANONYMOUS_USERS
from geonode.layers.models import Layer
from geonode.maps.models import Map
from geonode.documents.models import Document
from geonode.people.models import Profile
from geonode.search import search
from geonode.search import util
from geonode.search.populate_search_test_data import create_models
from geonode.search.query import query_from_request
from agon_ratings.models import OverallRating
import json
import logging
# quack
MockRequest = lambda **kw: type('xyz',(object,),{'REQUEST':kw,'user':None})
def all_public():
'''ensure all layers, maps and documents are publicly viewable'''
for l in Layer.objects.all():
l.set_default_permissions()
for m in Map.objects.all():
m.set_default_permissions()
for d in Document.objects.all():
d.set_default_permissions()
class searchTest(TestCase):
c = Client()
fixtures = ['initial_data.json']
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
from django.core.cache import cache
cache.clear()
searchTest('_fixture_setup')._fixture_setup(True)
create_models()
all_public()
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
searchTest('_fixture_teardown')._fixture_teardown(True)
logging.getLogger('south').setLevel(logging.DEBUG)
def _fixture_setup(self, a=False):
if a:
super(searchTest, self)._fixture_setup()
def _fixture_teardown(self, a=False):
if a:
super(searchTest, self)._fixture_teardown()
def request(self, query=None, **options):
query_dict = dict(q=query) if query else {}
get_params = dict(query_dict, **options)
return self.c.get('/search/api', get_params)
def assert_results_contain_title(self, jsonvalue, title, _type=None):
matcher = (lambda doc: doc['title'] == title if _type is None else
lambda doc: doc['title'] == title and doc['_type'] == _type)
matches = filter(matcher, jsonvalue['results'])
self.assertTrue(matches, "No results match %s" % title)
def search_assert(self, response, **options):
jsonvalue = json.loads(response.content)
facets = jsonvalue['facets']
if 'layer' in facets:
self.assertEquals(facets['raster'] + facets['vector'], facets['layer'])
# import pprint; pprint.pprint(jsonvalue)
self.assertFalse(jsonvalue.get('errors'))
self.assertTrue(jsonvalue.get('success'))
contains_maptitle = options.pop('contains_maptitle', None)
if contains_maptitle:
self.assert_results_contain_title(jsonvalue, contains_maptitle, 'map')
contains_layertitle = options.pop('contains_layertitle', None)
if contains_layertitle:
self.assert_results_contain_title(jsonvalue, contains_layertitle, 'layer')
contains_username = options.pop('contains_username', None)
if contains_username:
self.assert_results_contain_title(jsonvalue, contains_username, 'user')
n_results = options.pop('n_results', None)
if n_results:
self.assertEquals(n_results, len(jsonvalue['results']))
n_total = options.pop('n_total', None)
if n_total:
self.assertEquals(n_total, jsonvalue['total'])
first_title = options.pop('first_title', None)
if first_title:
self.assertTrue(len(jsonvalue['results']) > 0, 'No results found')
doc = jsonvalue['results'][0]
self.assertEquals(first_title, doc['title'])
sorted_by = options.pop('sorted_by', None)
if sorted_by:
reversed = sorted_by[0] == '-'
sorted_by = sorted_by.replace('-','')
sorted_fields = [ jv[sorted_by] for jv in jsonvalue['results'] ]
expected = list(sorted_fields)
expected.sort(reverse = reversed)
self.assertEquals(sorted_fields, expected)
def test_limit(self):
self.search_assert(self.request(limit=1), n_results=1)
def test_query_map_title(self):
self.search_assert(self.request('unique'), contains_maptitle='map one')
def test_query_layer_title(self):
self.search_assert(self.request('uniquetitle'),
contains_layerid='uniquetitle')
def test_username(self):
self.search_assert(self.request('jblaze'), contains_username='jblaze')
def test_profile(self):
self.search_assert(self.request("some other information"),
contains_username='jblaze')
def test_text_across_types(self):
self.search_assert(self.request('foo'), n_results=8, n_total=8)
self.search_assert(self.request('common'), n_results=10, n_total=22)
def test_pagination(self):
self.search_assert(self.request('common', start=0), n_results=10, n_total=22)
self.search_assert(self.request('common', start=10), n_results=10, n_total=22)
self.search_assert(self.request('common', start=20), n_results=2, n_total=22)
def test_bbox_query(self):
# @todo since maps and users are excluded at the moment, this will have
# to be revisited
self.search_assert(self.request(extent='-180,180,-90,90', limit=None), n_results=26, n_total=26)
self.search_assert(self.request(extent='0,10,0,10', limit=None), n_results=7)
self.search_assert(self.request(extent='0,1,0,1', limit=None), n_results=2)
def test_bbox_result(self):
# grab one and set the bounds
lyr = Layer.objects.all()[0]
lyr.bbox_x0 = -100
lyr.bbox_x1 = -90
lyr.bbox_y0 = 38
lyr.bbox_y1 = 40
lyr.save()
response = json.loads(self.request(lyr.title,type='layer').content)
self.assertEquals({u'minx': u'-100', u'miny': u'38', u'maxx': u'-90', u'maxy': u'40'},
response['results'][0]['bbox'])
def test_date_query(self):
self.search_assert(self.request(period='1980-01-01T00:00:00Z,1995-01-01T00:00:00Z'),
n_results=3)
self.search_assert(self.request(period=',1995-01-01T00:00:00Z'),
n_results=7)
self.search_assert(self.request(period='1980-01-01T00:00:00Z,'),
n_results=10, n_total=22)
def test_errors(self):
self.assert_error(self.request(sort='foo'),
"valid sorting values are: ['alphaaz', 'newest', 'popularity', 'alphaza', 'none', 'rel', 'oldest']")
self.assert_error(self.request(extent='1,2,3'),
'extent filter must contain x0,x1,y0,y1 comma separated')
self.assert_error(self.request(extent='a,b,c,d'),
'extent filter must contain x0,x1,y0,y1 comma separated')
self.assert_error(self.request(start='x'),
'startIndex must be valid number')
self.assert_error(self.request(limit='x'),
'limit must be valid number')
self.assert_error(self.request(added='x'),
'valid added filter values are: today,week,month')
def assert_error(self, resp, msg):
obj = json.loads(resp.content)
self.assertTrue(obj['success'] == False)
self.assertEquals(msg, obj['errors'][0])
def test_sort(self):
self.search_assert(self.request('foo', sort='newest',type='layer'),
first_title='common blar', sorted_by='-last_modified')
self.search_assert(self.request('foo', sort='oldest',type='layer'),
first_title='common double time', sorted_by='last_modified')
self.search_assert(self.request('foo', sort='alphaaz'),
first_title='bar baz', sorted_by='title')
self.search_assert(self.request('foo', sort='alphaza'),
first_title='uniquefirst foo', sorted_by='-title')
# apply some ratings
ct = ContentType.objects.get_for_model(Layer)
for l in Layer.objects.all():
OverallRating.objects.create(content_type=ct, object_id=l.pk, rating=l.pk, category=3)
ct = ContentType.objects.get_for_model(Map)
for l in Map.objects.all():
OverallRating.objects.create(content_type=ct, object_id=l.pk, rating=l.pk, category=1)
# clear any cached ratings
from django.core.cache import cache
cache.clear()
self.search_assert(self.request('foo', sort='popularity'),
first_title='common double time', sorted_by='-rating')
def test_keywords(self):
# this tests the matching of the general query to keywords
self.search_assert(self.request('populartag'), n_results=10, n_total=26)
self.search_assert(self.request('maptagunique'), n_results=1, n_total=1)
self.search_assert(self.request('layertagunique'), n_results=1, n_total=1)
# verify little chunks must entirely match keywords
# po ma la are the prefixes to the former keywords :)
self.search_assert(self.request('po ma la'), n_results=0, n_total=0)
def test_type_query(self):
self.search_assert(self.request('common', type='map'), n_results=8, n_total=8)
self.search_assert(self.request('common', type='layer'), n_results=5, n_total=5)
self.search_assert(self.request('common', type='document'), n_results=9, n_total=9)
self.search_assert(self.request('foo', type='user'), n_results=4, n_total=4)
# there are 8 total layers, half vector, half raster
self.search_assert(self.request('', type='raster'), n_results=4, n_total=4)
self.search_assert(self.request('', type='vector'), n_results=4, n_total=4)
def test_kw_query(self):
# a kw-only query should filter out those not matching the keyword
self.search_assert(self.request('', kw='here', type='layer'), n_results=1, n_total=1)
# no matches
self.search_assert(self.request('', kw='foobar', type='layer'), n_results=0, n_total=0)
def test_exclude_query(self):
# exclude one layer
self.search_assert(self.request('', exclude='CA'), n_results=10, n_total=32)
# exclude one general word
self.search_assert(self.request('', exclude='common'), n_results=10, n_total=28)
# exclude more than one word
self.search_assert(self.request('', exclude='common,something'), n_results=10, n_total=24)
# exclude almost everything
self.search_assert(self.request('', exclude='common,something,ipsum,quux,morx,one'), n_results=10, n_total=11)
def test_category_search(self):
#search no categories
self.search_assert(self.request('', category=''), n_results=10, n_total=33)
#search, one category
self.search_assert(self.request('', category='location'), n_results=9, n_total=9)
# search two categories
self.search_assert(self.request('', category='location,biota'), n_results=10, n_total=17)
# search with all three categories
self.search_assert(self.request('', category='location,biota,elevation'), n_results=10, n_total=26)
def test_author_endpoint(self):
resp = self.c.get('/search/api/authors')
jsobj = json.loads(resp.content)
self.assertEquals(7, jsobj['total'])
def test_search_page(self):
from django.core.cache import cache
cache.clear()
resp = self.c.get(reverse('search'))
self.assertEquals(200, resp.status_code)
def test_util(self):
jdate = util.iso_str_to_jdate('-5000-01-01T12:00:00Z')
self.assertEquals(jdate, -105192)
roundtripped = util.jdate_to_approx_iso_str(jdate)
self.assertEquals(roundtripped, '-4999-01-03')
def test_security_trimming(self):
try:
self.run_security_trimming()
finally:
all_public()
def run_security_trimming(self):
# remove permissions on all jblaze layers
jblaze_layers = Layer.objects.filter(owner__username='jblaze')
hiding = jblaze_layers.count()
for l in jblaze_layers:
l.set_gen_level(ANONYMOUS_USERS, l.LEVEL_NONE)
l.set_gen_level(AUTHENTICATED_USERS, l.LEVEL_NONE)
# a (anonymous) layer query should exclude the number of hiding layers
self.search_assert(self.request(type='layer'), n_results=8 - hiding, n_total=8 - hiding)
# admin sees all
self.assertTrue(self.c.login(username='admin', password='admin'))
self.search_assert(self.request(type='layer'), n_results=8, n_total=8)
self.c.logout()
# a logged in jblaze will see his, too
jblaze = User.objects.get(username='jblaze')
jblaze.set_password('passwd')
jblaze.save()
self.assertTrue(self.c.login(username='jblaze', password='passwd'))
self.search_assert(self.request(type='layer'), n_results=8, n_total=8)
self.c.logout()
def test_relevance(self):
query = query_from_request(MockRequest(q='foo'), {})
def assert_rules(rules):
rank_rules = []
for model, model_rules in rules:
rank_rules.extend(search._rank_rules(model, *model_rules))
sql = search._add_relevance(query, rank_rules)
for _, model_rules in rules:
for attr, rank1, rank2 in model_rules:
self.assertTrue(('THEN %d ELSE 0' % rank1) in sql)
self.assertTrue(('THEN %d ELSE 0' % rank2) in sql)
self.assertTrue(attr in sql)
assert_rules([(Map, [('title', 10, 5), ('abstract', 5, 2)])])
assert_rules([(Layer,
[('name', 10, 1), ('title', 10, 5), ('abstract', 5, 2)])])
assert_rules([(User, [('username', 10, 5)]),
(Profile, [('organization', 5, 2)])])
| gpl-3.0 |
HonzaKral/elasticsearch | dev-tools/smoke_test_rc.py | 8 | 11742 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Smoke-tests a release candidate
#
# 1. Downloads the tar.gz, deb, RPM and zip file from the staging URL
# 2. Verifies it's sha1 hashes and GPG signatures against the release key
# 3. Installs all official plugins
# 4. Starts one node for tar.gz and zip packages and checks:
# -- if it runs with Java 1.8
# -- if the build hash given is the one that is returned by the status response
# -- if the build is a release version and not a snapshot version
# -- if all plugins are loaded
# -- if the status response returns the correct version
#
# USAGE:
#
# python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47
#
# to also test other plugins try run
#
# python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --plugins license,shield,watcher
#
# Note: Ensure the script is run from the elasticsearch top level directory
#
# For testing a release from sonatype try this:
#
# python3 -B dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --fetch_url https://oss.sonatype.org/content/repositories/releases/
#
import argparse
import tempfile
import os
from os.path import basename, dirname, isdir, join
import signal
import shutil
import urllib
import urllib.request
import hashlib
import time
import socket
import json
import base64
from urllib.parse import urlparse
from http.client import HTTPConnection
def find_official_plugins():
plugins_dir = join(dirname(dirname(__file__)), 'plugins')
plugins = []
for plugin in os.listdir(plugins_dir):
if isdir(join(plugins_dir, plugin)):
plugins.append(plugin)
return plugins
DEFAULT_PLUGINS = find_official_plugins()
try:
JAVA_HOME = os.environ['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.8*'`""")
# console colors
COLOR_OK = '\033[92m'
COLOR_END = '\033[0m'
def run(command, env_vars=None):
if env_vars:
for key, value in env_vars.items():
os.putenv(key, value)
print('*** Running: %s%s%s' % (COLOR_OK, command, COLOR_END))
if os.system(command):
raise RuntimeError(' FAILED: %s' % (command))
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
def sha1(file):
with open(file, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
def read_fully(file):
with open(file, encoding='utf-8') as f:
return f.read()
def wait_for_node_startup(es_dir, timeout=60, header={}):
print(' Waiting until node becomes available for at most %s seconds' % timeout)
for _ in range(timeout):
conn = None
try:
time.sleep(1)
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=1)
conn.request('GET', '/', headers=header)
res = conn.getresponse()
if res.status == 200:
return True
except IOError as e:
pass
#that is ok it might not be there yet
finally:
if conn:
conn.close()
return False
def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS):
print('Downloading and verifying release %s from %s' % (version, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
print(' ' + '*' * 80)
# here we create a temp gpg home where we download the release key as the only key into
# when we verify the signature it will fail if the signed key is not in the keystore and that
# way we keep the executing host unmodified since we don't have to import the key into the default keystore
gpg_home_dir = os.path.join(tmp_dir, "gpg_home_dir")
os.makedirs(gpg_home_dir, 0o700)
run('gpg --homedir %s --keyserver pool.sks-keyservers.net --recv-key D88E42B4' % gpg_home_dir)
for file in files:
name = os.path.basename(file)
print(' Smoketest file: %s' % name)
url = '%s/%s' % (base_url, file)
print(' Downloading %s' % (url))
artifact_path = os.path.join(tmp_dir, file)
downloaded_files.append(artifact_path)
current_artifact_dir = os.path.dirname(artifact_path)
urllib.request.urlretrieve(url, os.path.join(tmp_dir, file))
sha1_url = ''.join([url, '.sha1'])
checksum_file = artifact_path + ".sha1"
print(' Downloading %s' % (sha1_url))
urllib.request.urlretrieve(sha1_url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
expected = read_fully(checksum_file)
actual = sha1(artifact_path)
if expected != actual :
raise RuntimeError('sha1 hash for %s doesn\'t match %s != %s' % (name, expected, actual))
gpg_url = ''.join([url, '.asc'])
gpg_file = artifact_path + ".asc"
print(' Downloading %s' % (gpg_url))
urllib.request.urlretrieve(gpg_url, gpg_file)
print(' Verifying gpg signature %s' % (gpg_file))
run('cd %s && gpg --homedir %s --verify %s' % (current_artifact_dir, gpg_home_dir, os.path.basename(gpg_file)))
print(' ' + '*' * 80)
print()
smoke_test_release(version, downloaded_files, hash, plugins)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def get_host_from_ports_file(es_dir):
return read_fully(os.path.join(es_dir, 'logs/http.ports')).splitlines()[0]
def smoke_test_release(release, files, hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir))
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir))
else:
print(' Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_dir = os.path.join(tmp_dir, 'elasticsearch-%s' % (release))
es_run_path = os.path.join(es_dir, 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(es_dir, 'bin/elasticsearch-plugin')
plugin_names = {}
for plugin in plugins:
print(' Install plugin [%s]' % (plugin))
run('%s; export ES_JAVA_OPTS="-Des.plugins.staging=%s"; %s %s %s' % (java_exe(), hash, es_plugin_path, 'install -b', plugin))
plugin_names[plugin] = True
if 'x-pack' in plugin_names:
headers = { 'Authorization' : 'Basic %s' % base64.b64encode(b"es_admin:foobar").decode("UTF-8") }
es_shield_path = os.path.join(es_dir, 'bin/x-pack/users')
print(" Install dummy shield user")
run('%s; %s useradd es_admin -r superuser -p foobar' % (java_exe(), es_shield_path))
else:
headers = {}
print(' Starting elasticsearch deamon from [%s]' % es_dir)
try:
run('%s; %s -Enode.name=smoke_tester -Ecluster.name=prepare_release -Erepositories.url.allowed_urls=http://snapshot.test* %s -Enode.pidfile=%s -Enode.portsfile=true'
% (java_exe(), es_run_path, '-d', os.path.join(es_dir, 'es-smoke.pid')))
if not wait_for_node_startup(es_dir, header=headers):
print("elasticsearch logs:")
print('*' * 80)
logs = read_fully(os.path.join(es_dir, 'logs/prepare_release.log'))
print(logs)
print('*' * 80)
raise RuntimeError('server didn\'t start up')
try: # we now get / and /_nodes to fetch basic infos like hashes etc and the installed plugins
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=20)
conn.request('GET', '/', headers=headers)
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes/plugins?pretty=true', headers=headers)
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'].strip(), False):
raise RuntimeError('Unexpected plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.close()
finally:
pid_path = os.path.join(es_dir, 'es-smoke.pid')
if os.path.exists(pid_path): # try reading the pid and kill the node
pid = int(read_fully(pid_path))
os.kill(pid, signal.SIGKILL)
shutil.rmtree(tmp_dir)
print(' ' + '*' * 80)
print()
def parse_list(string):
return [x.strip() for x in string.split(',')]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SmokeTests a Release Candidate from S3 staging repo')
parser.add_argument('--version', '-v', dest='version', default=None,
help='The Elasticsearch Version to smoke-tests', required=True)
parser.add_argument('--hash', '-s', dest='hash', default=None, required=True,
help='The hash of the unified release')
parser.add_argument('--plugins', '-p', dest='plugins', default=[], required=False, type=parse_list,
help='A list of additional plugins to smoketest')
parser.add_argument('--fetch_url', '-u', dest='url', default=None,
help='Fetched from the specified URL')
parser.set_defaults(hash=None)
parser.set_defaults(plugins=[])
parser.set_defaults(version=None)
parser.set_defaults(url=None)
args = parser.parse_args()
plugins = args.plugins
version = args.version
hash = args.hash
url = args.url
files = [ x % {'version': version} for x in [
'elasticsearch-%(version)s.tar.gz',
'elasticsearch-%(version)s.zip',
'elasticsearch-%(version)s.deb',
'elasticsearch-%(version)s.rpm'
]]
verify_java_version('1.8')
if url:
download_url = url
else:
download_url = 'https://staging.elastic.co/%s-%s/downloads/elasticsearch' % (version, hash)
download_and_verify(version, hash, files, download_url, plugins=DEFAULT_PLUGINS + plugins)
| apache-2.0 |
loco-odoo/localizacion_co | openerp/addons-extra/odoo-pruebas/odoo-server/addons/base_report_designer/plugin/openerp_report_designer/bin/script/ConvertFieldsToBraces.py | 384 | 2324 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import unohelper
import string
import re
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from LoginTest import *
database="test"
uid = 3
class ConvertFieldsToBraces( unohelper.Base, XJobExecutor ):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.aReportSyntex=[]
self.getFields()
def getFields(self):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
oPar.getAnchor().Text.insertString(oPar.getAnchor(),oPar.Items[1],False)
oPar.dispose()
if __name__<>"package":
ConvertFieldsToBraces(None)
else:
g_ImplementationHelper.addImplementation( ConvertFieldsToBraces, "org.openoffice.openerp.report.convertFB", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dilawar/moose-chemical | utils/test_expr.py | 3 | 3774 | """test_expr.py:
Handle tests.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2015, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import re
import numpy as np
import logging
import moose.utils as mu
logger_ = logging.getLogger('util.testexpr')
class LTL():
def __init__(self, name, expr):
"""
assertion;
op : operator
interval: in the given interval
"""
self.name = name
self.expr = expr
self.assertion, self.op, self.interval = expr.split(';')
self.start = None
self.stop = None
self.interval = self.interval.split()
self.field = None
self.binop = None
self.value = None
self.error = 0.01
self.le = None # lambda expression
self.test_func = None
self.keywords = [ 'EQ' , 'AE', 'GT', 'LT', 'GE', 'LE'
, 'EH' # eventually happen
, 'HA' # Happen after
, 'AH' # always happens
, 'NH' # Never happens
, 'H\d+' # happends N times.
]
self.parse()
def getField(self):
fs = self.assertion.split()
self.field = fs[0].strip()
self.binop = fs[1].strip()
self.value = float(fs[2].strip())
def getInterval(self):
self.start = float(self.interval[0].strip())
self.stop = float(self.interval[1].strip())
def lambda_expr(self):
"""
NOTICE: This function returns False on sucess.
Always return False on success
"""
self.le = 'lambda x, y : '
if self.binop == 'AE': self.le += 'not (x - y)/x < %s' % self.error
elif self.binop == 'EQ': self.le += 'not x == y '
elif self.binop == 'NE': self.le += 'not x != y '
elif self.binop == 'LT': self.le += 'not x < y'
elif self.binop == 'LE': self.le += 'not x <= y'
elif self.binop == 'GT': self.le += 'not x > y'
elif self.binop == 'GE': self.le += 'not x >= y'
else:
warnings.warn('BINOP %s not supported yet' % self.binop)
self.le += 'False'
self.test_func = eval(self.le)
def parse(self):
self.getField()
self.getInterval()
self.lambda_expr()
def execute_tests(time, node, molecule):
""" Run given ltl test on a node"""
logger_.info("Running test on molecule: %s" % molecule)
[ assert_ltl(ltl, node, molecule, time) for ltl in node['ltls'] ]
def assert_ltl(ltl, node, molecule, time):
logger_.debug("Running a LTL (%s) on given node" % ltl.name)
field = ltl.field
vec = node['%s_table' % field].vector
N = len(vec)
dt = time / N
startN, stopN = int(ltl.start/dt), int(ltl.stop/dt)
data = vec[startN:stopN]
if len(data) == 0:
mu.warn([ "Ignoring test"
, "Probably simulation time is not enough" ]
)
return None
func = np.vectorize(ltl.test_func)
res = func(data, ltl.value)
witness = startN + np.flatnonzero(res)
time_witness = witness * dt
value_witness = np.take(vec, witness)
if len(witness) == 0: print("\t Passed")
else:
outfile = "%s_%s.witness" % (molecule, ltl.name)
witness_mat = np.vstack([time_witness, value_witness]).T
print("\tFailed. Witness is printed below (time, value)")
print(witness_mat)
print("NOTICE: These witness are also saved to file: %s" % outfile)
np.savetxt(outfile, witness_mat, delimiter=',')
| gpl-2.0 |
nlu90/heron | heron/tools/tracker/src/python/handlers/topologyconfighandler.py | 5 | 2065 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' topologyhandler.py '''
import traceback
import tornado.gen
import tornado.web
from heron.common.src.python.utils.log import Log
from heron.tools.tracker.src.python.handlers import BaseHandler
class TopologyConfigHandler(BaseHandler):
"""
url - /topologies/config
Parameters:
- cluster (required)
- role - (optional) Role used to submit the topology.
- environ (required)
- topology (required) name of the requested topology
the response json is a dictionary with all the
configuration for the topology.
"""
# pylint: disable=attribute-defined-outside-init
def initialize(self, tracker):
""" initialize """
self.tracker = tracker
@tornado.gen.coroutine
def get(self):
""" get method """
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)
config = topology_info["physical_plan"]["config"]
self.write_success_response(config)
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e)
| apache-2.0 |
bq/bitbloq-offline | app/res/web2board/win32/res/Scons/sconsFiles/SCons/Debug.py | 6 | 7259 | """SCons.Debug
Code for debugging SCons internal things. Shouldn't be
needed by most users.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Debug.py rel_2.4.1:3453:73fefd3ea0b0 2015/11/09 03:25:05 bdbaddog"
import os
import sys
import time
import weakref
import inspect
# Global variable that gets set to 'True' by the Main script,
# when the creation of class instances should get tracked.
track_instances = False
# List of currently tracked classes
tracked_classes = {}
def logInstanceCreation(instance, name=None):
if name is None:
name = instance.__class__.__name__
if name not in tracked_classes:
tracked_classes[name] = []
if hasattr(instance, '__dict__'):
tracked_classes[name].append(weakref.ref(instance))
else:
# weakref doesn't seem to work when the instance
# contains only slots...
tracked_classes[name].append(instance)
def string_to_classes(s):
if s == '*':
return sorted(tracked_classes.keys())
else:
return s.split()
def fetchLoggedInstances(classes="*"):
classnames = string_to_classes(classes)
return [(cn, len(tracked_classes[cn])) for cn in classnames]
def countLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write("%s: %d\n" % (classname, len(tracked_classes[classname])))
def listLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
if inspect.isclass(ref):
obj = ref()
else:
obj = ref
if obj is not None:
file.write(' %s\n' % repr(obj))
def dumpLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s:\n' % obj)
for key, value in obj.__dict__.items():
file.write(' %20s : %s\n' % (key, value))
if sys.platform[:5] == "linux":
# Linux doesn't actually support memory usage stats from getrusage().
def memory():
mstr = open('/proc/self/stat').read()
mstr = mstr.split()[22]
return int(mstr)
elif sys.platform[:6] == 'darwin':
#TODO really get memory stats for OS X
def memory():
return 0
else:
try:
import resource
except ImportError:
try:
import win32process
import win32api
except ImportError:
def memory():
return 0
else:
def memory():
process_handle = win32api.GetCurrentProcess()
memory_info = win32process.GetProcessMemoryInfo( process_handle )
return memory_info['PeakWorkingSetSize']
else:
def memory():
res = resource.getrusage(resource.RUSAGE_SELF)
return res[4]
# returns caller's stack
def caller_stack():
import traceback
tb = traceback.extract_stack()
# strip itself and the caller from the output
tb = tb[:-2]
result = []
for back in tb:
# (filename, line number, function name, text)
key = back[:3]
result.append('%s:%d(%s)' % func_shorten(key))
return result
caller_bases = {}
caller_dicts = {}
# trace a caller's stack
def caller_trace(back=0):
import traceback
tb = traceback.extract_stack(limit=3+back)
tb.reverse()
callee = tb[1][:3]
caller_bases[callee] = caller_bases.get(callee, 0) + 1
for caller in tb[2:]:
caller = callee + caller[:3]
try:
entry = caller_dicts[callee]
except KeyError:
caller_dicts[callee] = entry = {}
entry[caller] = entry.get(caller, 0) + 1
callee = caller
# print a single caller and its callers, if any
def _dump_one_caller(key, file, level=0):
leader = ' '*level
for v,c in sorted([(-v,c) for c,v in caller_dicts[key].items()]):
file.write("%s %6d %s:%d(%s)\n" % ((leader,-v) + func_shorten(c[-3:])))
if c in caller_dicts:
_dump_one_caller(c, file, level+1)
# print each call tree
def dump_caller_counts(file=sys.stdout):
for k in sorted(caller_bases.keys()):
file.write("Callers of %s:%d(%s), %d calls:\n"
% (func_shorten(k) + (caller_bases[k],)))
_dump_one_caller(k, file)
shorten_list = [
( '/scons/SCons/', 1),
( '/src/engine/SCons/', 1),
( '/usr/lib/python', 0),
]
if os.sep != '/':
shorten_list = [(t[0].replace('/', os.sep), t[1]) for t in shorten_list]
def func_shorten(func_tuple):
f = func_tuple[0]
for t in shorten_list:
i = f.find(t[0])
if i >= 0:
if t[1]:
i = i + len(t[0])
return (f[i:],)+func_tuple[1:]
return func_tuple
TraceFP = {}
if sys.platform == 'win32':
TraceDefault = 'con'
else:
TraceDefault = '/dev/tty'
TimeStampDefault = None
StartTime = time.time()
PreviousTime = StartTime
def Trace(msg, file=None, mode='w', tstamp=None):
"""Write a trace message to a file. Whenever a file is specified,
it becomes the default for the next call to Trace()."""
global TraceDefault
global TimeStampDefault
global PreviousTime
if file is None:
file = TraceDefault
else:
TraceDefault = file
if tstamp is None:
tstamp = TimeStampDefault
else:
TimeStampDefault = tstamp
try:
fp = TraceFP[file]
except KeyError:
try:
fp = TraceFP[file] = open(file, mode)
except TypeError:
# Assume we were passed an open file pointer.
fp = file
if tstamp:
now = time.time()
fp.write('%8.4f %8.4f: ' % (now - StartTime, now - PreviousTime))
PreviousTime = now
fp.write(msg)
fp.flush()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
mscherer/rpmlint | I18NCheck.py | 2 | 6013 | # -*- coding: utf-8 -*-
#############################################################################
# File : I18NCheck.py
# Package : rpmlint
# Author : Frederic Lepied
# Created on : Mon Nov 22 20:02:56 1999
# Purpose : checks i18n bugs.
#############################################################################
import re
import rpm
from Filter import addDetails, printError, printWarning
from __isocodes__ import COUNTRIES, LANGUAGES
import AbstractCheck
# Associative array of invalid value => correct value
INCORRECT_LOCALES = {
'in': 'id',
'in_ID': 'id_ID',
'iw': 'he',
'iw_IL': 'he_IL',
'gr': 'el',
'gr_GR': 'el_GR',
'cz': 'cs',
'cz_CZ': 'cs_CZ',
'lug': 'lg', # 'lug' is valid, but we standardize on 2 letter codes
'en_UK': 'en_GB'}
package_regex = re.compile('-(' + '|'.join(LANGUAGES) + ')$')
locale_regex = re.compile('^(/usr/share/locale/([^/]+))/')
correct_subdir_regex = re.compile('^(([a-z][a-z]([a-z])?(_[A-Z][A-Z])?)([.@].*$)?)$')
lc_messages_regex = re.compile('/usr/share/locale/([^/]+)/LC_MESSAGES/.*(mo|po)$')
man_regex = re.compile('/usr(?:/share)?/man/([^/]+)/man[0-9n][^/]*/[^/]+$')
# list of exceptions
#
# note: ISO-8859-9E is non standard, ISO-8859-{6,8} are of limited use
# as locales (since all modern handling of bidi is based on utf-8 anyway),
# so they should be removed once UTF-8 is deployed)
EXCEPTION_DIRS = (
'C', 'POSIX', 'CP1251', 'CP1255', 'CP1256',
'ISO-8859-1', 'ISO-8859-2', 'ISO-8859-3', 'ISO-8859-4', 'ISO-8859-5',
'ISO-8859-6', 'ISO-8859-7', 'ISO-8859-8', 'ISO-8859-9', 'ISO-8859-9E',
'ISO-8859-10', 'ISO-8859-13', 'ISO-8859-14', 'ISO-8859-15',
'KOI8-R', 'KOI8-U', 'UTF-8', 'default')
def is_valid_lang(lang):
# TODO: @Foo and charset handling
lang = re.sub("[@.].*$", "", lang)
if lang in LANGUAGES:
return True
ix = lang.find("_")
if ix == -1:
return False
# TODO: don't accept all lang_COUNTRY combinations
country = lang[ix+1:]
if country not in COUNTRIES:
return False
lang = lang[0:ix]
if lang not in LANGUAGES:
return False
return True
class I18NCheck(AbstractCheck.AbstractCheck):
def __init__(self):
AbstractCheck.AbstractCheck.__init__(self, 'I18NCheck')
def check_binary(self, pkg):
files = list(pkg.files().keys())
files.sort()
locales = [] # list of locales for this packages
webapp = False
i18n_tags = pkg[rpm.RPMTAG_HEADERI18NTABLE] or ()
for i in i18n_tags:
try:
correct = INCORRECT_LOCALES[i]
printError(pkg, 'incorrect-i18n-tag-' + correct, i)
except KeyError:
pass
# as some webapps have their files under /var/www/html, and
# others in /usr/share or /usr/lib, the only reliable way
# sofar to detect them is to look for an apache configuration file
for f in files:
if f.startswith('/etc/apache2/') or \
f.startswith('/etc/httpd/conf.d/'):
webapp = True
for f in files:
res = locale_regex.search(f)
if res:
locale = res.group(2)
# checks the same locale only once
if locale not in locales:
locales.append(locale)
res2 = correct_subdir_regex.search(locale)
if not res2:
if locale not in EXCEPTION_DIRS:
printError(pkg, 'incorrect-locale-subdir', f)
else:
locale_name = res2.group(2)
try:
correct = INCORRECT_LOCALES[locale_name]
printError(pkg, 'incorrect-locale-' + correct, f)
except KeyError:
pass
res = lc_messages_regex.search(f)
subdir = None
if res:
subdir = res.group(1)
if not is_valid_lang(subdir):
printError(pkg, 'invalid-lc-messages-dir', f)
else:
res = man_regex.search(f)
if res:
subdir = res.group(1)
if is_valid_lang(subdir):
subdir = None
else:
printError(pkg, 'invalid-locale-man-dir', f)
if f.endswith('.mo') or subdir:
if pkg.files()[f].lang == '' and not webapp:
printWarning(pkg, 'file-not-in-%lang', f)
main_dir, main_lang = ("", "")
for f in files:
lang = pkg.files()[f].lang
if main_lang and lang == "" and is_prefix(main_dir + '/', f):
printError(pkg, 'subfile-not-in-%lang', f)
if main_lang != lang:
main_dir, main_lang = f, lang
name = pkg.name
res = package_regex.search(name)
if res:
locales = 'locales-' + res.group(1)
if locales != name:
if locales not in (x[0] for x in pkg.requires()):
printError(pkg, 'no-dependency-on', locales)
def is_prefix(p, s):
return len(p) <= len(s) and p == s[:len(p)]
# Create an object to enable the auto registration of the test
check = I18NCheck()
addDetails(
# Need to add a function to list all the locales
'incorrect-i18n-tag-',
"""
""",
'incorrect-locale-subdir',
"""
""",
'incorrect-locale-',
"""
""",
'invalid-lc-messages-dir',
"""
""",
'invalid-locale-man-dir',
"""
""",
'file-not-in-lang',
"""
""",
'no-dependency-on',
"""
""",
'subfile-not-in-%lang',
""" If /foo/bar is not tagged %lang(XX) whereas /foo is, the package won't be
installable if XX is not in %_install_langs.""",
)
# I18NCheck.py ends here
# Local variables:
# indent-tabs-mode: nil
# py-indent-offset: 4
# End:
# ex: ts=4 sw=4 et
| gpl-2.0 |
40423233/2017springcd_hw | plugin/liquid_tags/b64img.py | 312 | 3085 | """
Image Tag
---------
This implements a Liquid-style image tag for Pelican,
based on the liquid img tag which is based on the octopress image tag [1]_
Syntax
------
{% b64img [class name(s)] [http[s]:/]/path/to/image [width [height]] [title text | "title text" ["alt text"]] %}
Examples
--------
{% b64img /images/ninja.png Ninja Attack! %}
{% b64img left half http://site.com/images/ninja.png Ninja Attack! %}
{% b64img left half http://site.com/images/ninja.png 150 150 "Ninja Attack!" "Ninja in attack posture" %}
Output
------
<img src="data:;base64,....">
<img class="left half" src="data:;base64,..." title="Ninja Attack!" alt="Ninja Attack!">
<img class="left half" src="data:;base64,..." width="150" height="150" title="Ninja Attack!" alt="Ninja in attack posture">
[1] https://github.com/imathis/octopress/blob/master/plugins/image_tag.rb
"""
import re
import base64
import urllib2
from .mdx_liquid_tags import LiquidTags
import six
SYNTAX = '{% b64img [class name(s)] [http[s]:/]/path/to/image [width [height]] [title text | "title text" ["alt text"]] %}'
# Regular expression to match the entire syntax
ReImg = re.compile("""(?P<class>\S.*\s+)?(?P<src>(?:https?:\/\/|\/|\S+\/)\S+)(?:\s+(?P<width>\d+))?(?:\s+(?P<height>\d+))?(?P<title>\s+.+)?""")
# Regular expression to split the title and alt text
ReTitleAlt = re.compile("""(?:"|')(?P<title>[^"']+)?(?:"|')\s+(?:"|')(?P<alt>[^"']+)?(?:"|')""")
def _get_file(src):
""" Return content from local or remote file. """
try:
if '://' in src or src[0:2] == '//': # Most likely this is remote file
response = urllib2.urlopen(src)
return response.read()
else:
with open(src, 'rb') as fh:
return fh.read()
except Exception as e:
raise RuntimeError('Error generating base64image: {}'.format(e))
def base64image(src):
""" Generate base64 encoded image from srouce file. """
return base64.b64encode(_get_file(src))
@LiquidTags.register('b64img')
def b64img(preprocessor, tag, markup):
attrs = None
# Parse the markup string
match = ReImg.search(markup)
if match:
attrs = dict([(key, val.strip())
for (key, val) in six.iteritems(match.groupdict()) if val])
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
# Check if alt text is present -- if so, split it from title
if 'title' in attrs:
match = ReTitleAlt.search(attrs['title'])
if match:
attrs.update(match.groupdict())
if not attrs.get('alt'):
attrs['alt'] = attrs['title']
attrs['src'] = 'data:;base64,{}'.format(base64image(attrs['src']))
# Return the formatted text
return "<img {0}>".format(' '.join('{0}="{1}"'.format(key, val)
for (key, val) in six.iteritems(attrs)))
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from .liquid_tags import register
| agpl-3.0 |
skuarch/namebench | nb_third_party/dns/rdtypes/ANY/HIP.py | 248 | 4933 | # Copyright (C) 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import string
import struct
import dns.exception
import dns.rdata
import dns.rdatatype
class HIP(dns.rdata.Rdata):
"""HIP record
@ivar hit: the host identity tag
@type hit: string
@ivar algorithm: the public key cryptographic algorithm
@type algorithm: int
@ivar key: the public key
@type key: string
@ivar servers: the rendezvous servers
@type servers: list of dns.name.Name objects
@see: RFC 5205"""
__slots__ = ['hit', 'algorithm', 'key', 'servers']
def __init__(self, rdclass, rdtype, hit, algorithm, key, servers):
super(HIP, self).__init__(rdclass, rdtype)
self.hit = hit
self.algorithm = algorithm
self.key = key
self.servers = servers
def to_text(self, origin=None, relativize=True, **kw):
hit = self.hit.encode('hex-codec')
key = self.key.encode('base64-codec').replace('\n', '')
text = ''
servers = []
for server in self.servers:
servers.append(str(server.choose_relativity(origin, relativize)))
if len(servers) > 0:
text += (' ' + ' '.join(servers))
return '%u %s %s%s' % (self.algorithm, hit, key, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
algorithm = tok.get_uint8()
hit = tok.get_string().decode('hex-codec')
if len(hit) > 255:
raise dns.exception.SyntaxError("HIT too long")
key = tok.get_string().decode('base64-codec')
servers = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
server = dns.name.from_text(token.value, origin)
server.choose_relativity(origin, relativize)
servers.append(server)
return cls(rdclass, rdtype, hit, algorithm, key, servers)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
lh = len(self.hit)
lk = len(self.key)
file.write(struct.pack("!BBH", lh, self.algorithm, lk))
file.write(self.hit)
file.write(self.key)
for server in self.servers:
server.to_wire(file, None, origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(lh, algorithm, lk) = struct.unpack('!BBH',
wire[current : current + 4])
current += 4
rdlen -= 4
hit = wire[current : current + lh]
current += lh
rdlen -= lh
key = wire[current : current + lk]
current += lk
rdlen -= lk
servers = []
while rdlen > 0:
(server, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
current += cused
rdlen -= cused
if not origin is None:
server = server.relativize(origin)
servers.append(server)
return cls(rdclass, rdtype, hit, algorithm, key, servers)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
servers = []
for server in self.servers:
server = server.choose_relativity(origin, relativize)
servers.append(server)
self.servers = servers
def _cmp(self, other):
b1 = cStringIO.StringIO()
lh = len(self.hit)
lk = len(self.key)
b1.write(struct.pack("!BBH", lh, self.algorithm, lk))
b1.write(self.hit)
b1.write(self.key)
b2 = cStringIO.StringIO()
lh = len(other.hit)
lk = len(other.key)
b2.write(struct.pack("!BBH", lh, other.algorithm, lk))
b2.write(other.hit)
b2.write(other.key)
v = cmp(b1.getvalue(), b2.getvalue())
if v != 0:
return v
ls = len(self.servers)
lo = len(other.servers)
count = min(ls, lo)
i = 0
while i < count:
v = cmp(self.servers[i], other.servers[i])
if v != 0:
return v
i += 1
return ls - lo
| apache-2.0 |
simonluijk/django-localeurl | localeurl/utils.py | 4 | 2928 | from django.conf import settings
from django.core import urlresolvers
from localeurl import settings as localeurl_settings
def is_locale_independent(path):
"""
Returns whether the path is locale-independent.
"""
if (localeurl_settings.LOCALE_INDEPENDENT_MEDIA_URL and
settings.MEDIA_URL and
path.startswith(settings.MEDIA_URL)):
return True
if (localeurl_settings.LOCALE_INDEPENDENT_STATIC_URL and
getattr(settings, "STATIC_URL", None) and
path.startswith(settings.STATIC_URL)):
return True
for regex in localeurl_settings.LOCALE_INDEPENDENT_PATHS:
if regex.search(path):
return True
return False
def strip_path(path):
"""
Separates the locale prefix from the rest of the path. If the path does not
begin with a locale it is returned without change.
"""
check = localeurl_settings.PATH_RE.match(path)
if check:
path_info = check.group('path') or '/'
if path_info.startswith('/'):
return check.group('locale'), path_info
return '', path
def supported_language(locale):
"""
Returns the supported language (from settings.LANGUAGES) for the locale.
"""
locale = locale.lower()
if locale in localeurl_settings.SUPPORTED_LOCALES:
return locale
elif locale[:2] in localeurl_settings.SUPPORTED_LOCALES:
return locale[:2]
else:
return None
def is_default_locale(locale):
"""
Returns whether the locale is the default locale.
"""
return locale == supported_language(settings.LANGUAGE_CODE)
def locale_path(path, locale=''):
"""
Generate the localeurl-enabled path from a path without locale prefix. If
the locale is empty settings.LANGUAGE_CODE is used.
"""
locale = supported_language(locale)
if not locale:
locale = supported_language(settings.LANGUAGE_CODE)
if is_locale_independent(path):
return path
elif is_default_locale(locale) and not localeurl_settings.PREFIX_DEFAULT_LOCALE:
return path
else:
return ''.join([u'/', locale, path])
def locale_url(path, locale=''):
"""
Generate the localeurl-enabled URL from a path without locale prefix. If
the locale is empty settings.LANGUAGE_CODE is used.
"""
path = locale_path(path, locale)
return add_script_prefix(path)
def strip_script_prefix(url):
"""
Strips the SCRIPT_PREFIX from the URL. Because this function is meant for
use in templates, it assumes the URL starts with the prefix.
"""
assert url.startswith(urlresolvers.get_script_prefix()), \
"URL must start with SCRIPT_PREFIX: %s" % url
pos = len(urlresolvers.get_script_prefix()) - 1
return url[:pos], url[pos:]
def add_script_prefix(path):
"""
Prepends the SCRIPT_PREFIX to a path.
"""
return ''.join([urlresolvers.get_script_prefix(), path[1:]])
| mit |
takis/django | tests/m2m_through_regress/models.py | 273 | 2771 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# Forward declared intermediate model
@python_2_unicode_compatible
class Membership(models.Model):
person = models.ForeignKey('Person', models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
price = models.IntegerField(default=100)
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
# using custom id column to test ticket #11107
@python_2_unicode_compatible
class UserMembership(models.Model):
id = models.AutoField(db_column='usermembership_id', primary_key=True)
user = models.ForeignKey(User, models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
price = models.IntegerField(default=100)
def __str__(self):
return "%s is a user and member of %s" % (self.user.username, self.group.name)
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=128)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=128)
# Membership object defined as a class
members = models.ManyToManyField(Person, through=Membership)
user_members = models.ManyToManyField(User, through='UserMembership')
def __str__(self):
return self.name
# A set of models that use an non-abstract inherited model as the 'through' model.
class A(models.Model):
a_text = models.CharField(max_length=20)
class ThroughBase(models.Model):
a = models.ForeignKey(A, models.CASCADE)
b = models.ForeignKey('B', models.CASCADE)
class Through(ThroughBase):
extra = models.CharField(max_length=20)
class B(models.Model):
b_text = models.CharField(max_length=20)
a_list = models.ManyToManyField(A, through=Through)
# Using to_field on the through model
@python_2_unicode_compatible
class Car(models.Model):
make = models.CharField(max_length=20, unique=True, null=True)
drivers = models.ManyToManyField('Driver', through='CarDriver')
def __str__(self):
return "%s" % self.make
@python_2_unicode_compatible
class Driver(models.Model):
name = models.CharField(max_length=20, unique=True, null=True)
def __str__(self):
return "%s" % self.name
class Meta:
ordering = ('name',)
@python_2_unicode_compatible
class CarDriver(models.Model):
car = models.ForeignKey('Car', models.CASCADE, to_field='make')
driver = models.ForeignKey('Driver', models.CASCADE, to_field='name')
def __str__(self):
return "pk=%s car=%s driver=%s" % (str(self.pk), self.car, self.driver)
| bsd-3-clause |
andriibekker/biddingsbase | django/contrib/gis/db/backends/postgis/models.py | 403 | 1970 | """
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.2.2.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __unicode__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
| bsd-3-clause |
voidpp/asd | asd/openvpn_manage.py | 1 | 5613 | import os
import logging
import json
import tarfile
import StringIO
from collections import OrderedDict
from voidpp_tools.cli_utils import confirm_prompt
from subprocess import check_call, CalledProcessError, PIPE, check_output
from .tool import ToolBase
from .openvpn_config_schema import schema
logger = logging.getLogger(__name__)
cli_handlers = OrderedDict()
def reg_cli_handler(command_name, arguments = [], help = None):
def decorator(func):
cli_handlers[command_name] = dict(
arguments = arguments,
callback = func,
help = help,
)
return func
return decorator
class OpenVPNManager(ToolBase):
name = 'openvpn'
desc = 'Manage the openvpn clients'
def register(self, parser):
subparsers = parser.add_subparsers(dest = 'ovpn_command')
for command_name, handler in cli_handlers.items():
subparser = subparsers.add_parser(command_name, help = handler['help'])
for argument in handler['arguments']:
subparser.add_argument(*argument['name'], **argument['args'])
@reg_cli_handler('add-client', [dict(
name = ['name'],
args = dict(help = "name of the client"),
)], 'Generate client certificates and a tar file with the necessary files')
def add_client(self, name):
missing_mandatory_files = self.__check_mandatory_files()
if len(missing_mandatory_files):
logger.error("Missing mandatory file(s): %s", ', '.join(missing_mandatory_files))
return
config = self.__load_config()
if config is None:
logger.error("Not found the config file. Please run 'asd openvpn create-config'!")
return False
user_file_name = '{}.{}'.format(name, config['server_name'])
logger.debug("User filename: %s", user_file_name)
key_name = '{}/{}.key'.format(config['keys_folder'], user_file_name)
if os.path.exists(key_name):
logger.error("User '%s' is already exists", name)
return
res = self.__generate_client_keys(name)
if res is False:
logger.error("Keys were not generated.")
return
client_config_data = dict(
ca = '{}.crt'.format(config['server_name']),
key = '{}.key'.format(user_file_name),
cert = '{}.crt'.format(user_file_name),
)
filename = self.__create_tar_file(name, client_config_data, config)
logger.info("Client config created: '%s'", filename)
def __create_tar_file(self, name, client_config_data, config):
logger.debug("Create tar file with %s", client_config_data)
client_ovpn_data = self.__get_client_ovpn_content(config['client_config'], client_config_data)
ccd = client_config_data
filename = "{}.{}.tar".format(name, config['server_name'])
with tarfile.open(filename, 'w') as archive:
archive.add('{}/ca.crt'.format(config['keys_folder']), ccd['ca'])
archive.add('{}/{}.crt'.format(config['keys_folder'], name), ccd['cert'])
archive.add('{}/{}.key'.format(config['keys_folder'], name), ccd['key'])
client_config_stream = StringIO.StringIO()
client_config_stream.write(client_ovpn_data)
client_config_stream.seek(0)
tarinfo = tarfile.TarInfo(name = '{}.ovpn'.format(config['server_name']))
tarinfo.size = len(client_config_stream.buf)
archive.addfile(tarinfo, client_config_stream)
return filename
def __check_mandatory_files(self):
missing_files = []
for fn in ['vars', 'pkitool']:
if not os.path.isfile(fn):
missing_files.append(fn)
return missing_files
def __generate_client_keys(self, name):
command = ['bash', '-c', 'source vars && ./pkitool {}'.format(name)]
try:
logger.info(check_output(command, stderr = PIPE))
return True
except CalledProcessError as e:
logger.exception(e)
return False
def __load_config(self):
if not os.path.isfile(self.config_filename):
return None
with open(self.config_filename) as f:
return json.load(f, object_pairs_hook = OrderedDict)
def __get_client_ovpn_content(self, config_schema, data):
logger.debug("Generate .ovpn file with data: %s", data)
lines = []
for name, value in config_schema.items():
if name in data:
value = data[name]
line = [name]
if value is not None:
line += [str(value)]
lines.append(' '.join(line))
return '\n'.join(lines)
@reg_cli_handler('create-config', help = "Create default config. Need to edit the created file!")
def create_config(self):
if os.path.exists(self.config_filename):
logger.error("Config is already exists!")
if not confirm_prompt("Do you want to override?"):
return
with open(self.config_filename, 'w') as f:
json.dump(schema, f, indent=2)
logger.info("Config for openvpn manager created: %s", self.config_filename)
@property
def config_filename(self):
return os.path.join(os.getcwd(), 'openvpn-manager.json')
def process(self, args):
arg_data = args.__dict__
command = arg_data['ovpn_command']
del arg_data['ovpn_command']
del arg_data['command']
cli_handlers[command]['callback'](self, **arg_data)
| mit |
emilyvon/titanium_mobile | support/iphone/pbxproj.py | 36 | 4275 | #
# This is a little class that will parse out an XCode project.pbxproj file
# which is the proprietary metadata file for XCode projects
#
# Author: Jeff Haynie <jhaynie@appcelerator.com>
#
import os, uuid, sys, types, re
import StringIO
class PBXProj(object):
def __init__(self):
self.static_libs = []
def gen_uuid(self):
genid = uuid.uuid4().__str__().upper().replace('-','')
genid = genid[0:24]
return genid
def add_static_library(self,name,path,relative=False):
if path.find(name)==-1:
path = os.path.abspath(os.path.join(path,name))
self.static_libs.append((name,path,os.path.dirname(path),relative))
def parse(self,f):
contents = open(os.path.expanduser(f)).read()
file_markers = []
ref_markers = []
framework_markers = []
group_markers = []
target_libs = []
for lib in self.static_libs:
if contents.find(lib[0])==-1:
target_libs.append(lib)
if len(target_libs)==0:
return contents
for line in contents.splitlines():
# find our file marker
if line.find("/* libTiCore.a */;")!=-1:
file_markers.append(line)
if line.find("/* libTiCore.a */ =")!=-1:
ref_markers.append(line)
if line.find("/* libTiCore.a in Frameworks */,")!=-1:
framework_markers.append(line)
if line.find("/* libTiCore.a */,")!=-1:
group_markers.append(line)
file_markers_to_file_refs = {}
file_markers_to_frameworks = {}
group_uuid = None
for fm in file_markers:
m = re.search(r'([0-9a-zA-Z]+) /*',fm)
uuid = m.group(1).strip()
if group_uuid==None:
m = re.search(r'fileRef = ([0-9a-zA-Z]+) ',fm)
group_uuid = m.group(1).strip()
file_markers_to_file_refs[uuid]=self.gen_uuid()
for lib in target_libs:
libname = lib[0]
libpath = lib[1]
libdir = lib[2]
isRelative = lib[3]
new_group_uuid = self.gen_uuid()
for fm in file_markers:
begin = contents.find(fm)
end = begin + len(fm)
line = contents[begin:end]
line = line.replace('libTiCore.a',libname)
line = line.replace(group_uuid,new_group_uuid)
m = re.search(r'([0-9a-zA-Z]+) /*',fm)
new_uuid = self.gen_uuid()
file_markers_to_file_refs[m.group(1)]=new_uuid
line = line.replace(m.group(1),new_uuid)
contents = contents[0:end] + '\n' + line + '\n' + contents[end+1:]
for rm in ref_markers:
begin = contents.find(rm)
end = begin + len(rm)
line = contents[begin:end]
line = line.replace('lib/libTiCore.a',"\"%s\""%libpath)
line = line.replace('libTiCore.a',libname)
if not isRelative:
line = line.replace("SOURCE_ROOT","\"<absolute>\"")
m = re.search(r'([0-9a-zA-Z]+) /*',rm)
uuid = m.group(1).strip()
line = line.replace(uuid,new_group_uuid)
contents = contents[0:end] + '\n' + line + '\n' + contents[end+1:]
for gm in group_markers:
begin = contents.find(gm)
end = begin + len(gm)
line = contents[begin:end]
line = line.replace('libTiCore.a',libname)
line = line.replace(group_uuid,new_group_uuid)
contents = contents[0:end] + '\n' + line + '\n' + contents[end+1:]
for fm in framework_markers:
m = re.search(r'([0-9a-zA-Z]+) /*',fm)
fileRef = m.group(1).strip()
new_uuid = file_markers_to_file_refs[fileRef]
begin = contents.find(fm)
end = begin + len(fm)
line = contents[begin:end]
line = line.replace('libTiCore.a',libname)
line = line.replace(fileRef,new_uuid)
contents = contents[0:end] + '\n' + line + '\n' + contents[end+1:]
libpath = "\"\\\"$(SRCROOT)/lib\\\"\","
begin = contents.find(libpath)
while begin>0:
end = begin + len(libpath)
line = contents[begin:end]
line = line.replace(libpath,"\"\\\"%s\\\"\"," % libdir)
contents = contents[0:end] + '\n ' + line + '\n' + contents[end+1:]
begin = contents.find(libpath,end)
return contents
if __name__ == "__main__":
proj = PBXProj()
f = "~/work/payswipe/build/iphone/payswipe.xcodeproj/project.pbxproj"
proj.add_static_library('libmagtek.a','/Library/Application Support/Titanium/modules/iphone/magtek/1.0')
proj.add_static_library('libpaint.a','/Library/Application Support/Titanium/modules/iphone/paint/1.0')
out = proj.parse(f)
o = open(os.path.expanduser("~/tmp/foo.pbxproj"),'w')
o.write(out)
o.close()
| apache-2.0 |
jsg7440/jQuery-ToDo | node_modules/node-gyp/gyp/pylib/gyp/xcode_emulation.py | 1283 | 65086 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os
import os.path
import re
import shlex
import subprocess
import sys
import tempfile
from gyp.common import GypError
# Populated lazily by XcodeVersion, for efficiency, and to fix an issue when
# "xcodebuild" is called too quickly (it has been found to return incorrect
# version number).
XCODE_VERSION_CACHE = None
# Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance
# corresponding to the installed version of Xcode.
XCODE_ARCHS_DEFAULT_CACHE = None
def XcodeArchsVariableMapping(archs, archs_including_64_bit=None):
"""Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable,
and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT)."""
mapping = {'$(ARCHS_STANDARD)': archs}
if archs_including_64_bit:
mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit
return mapping
class XcodeArchsDefault(object):
"""A class to resolve ARCHS variable from xcode_settings, resolving Xcode
macros and implementing filtering by VALID_ARCHS. The expansion of macros
depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and
on the version of Xcode.
"""
# Match variable like $(ARCHS_STANDARD).
variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$')
def __init__(self, default, mac, iphonesimulator, iphoneos):
self._default = (default,)
self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator}
def _VariableMapping(self, sdkroot):
"""Returns the dictionary of variable mapping depending on the SDKROOT."""
sdkroot = sdkroot.lower()
if 'iphoneos' in sdkroot:
return self._archs['ios']
elif 'iphonesimulator' in sdkroot:
return self._archs['iossim']
else:
return self._archs['mac']
def _ExpandArchs(self, archs, sdkroot):
"""Expands variables references in ARCHS, and remove duplicates."""
variable_mapping = self._VariableMapping(sdkroot)
expanded_archs = []
for arch in archs:
if self.variable_pattern.match(arch):
variable = arch
try:
variable_expansion = variable_mapping[variable]
for arch in variable_expansion:
if arch not in expanded_archs:
expanded_archs.append(arch)
except KeyError as e:
print 'Warning: Ignoring unsupported variable "%s".' % variable
elif arch not in expanded_archs:
expanded_archs.append(arch)
return expanded_archs
def ActiveArchs(self, archs, valid_archs, sdkroot):
"""Expands variables references in ARCHS, and filter by VALID_ARCHS if it
is defined (if not set, Xcode accept any value in ARCHS, otherwise, only
values present in VALID_ARCHS are kept)."""
expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '')
if valid_archs:
filtered_archs = []
for arch in expanded_archs:
if arch in valid_archs:
filtered_archs.append(arch)
expanded_archs = filtered_archs
return expanded_archs
def GetXcodeArchsDefault():
"""Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
$(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
and deprecated with Xcode 5.1.
For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
architecture as part of $(ARCHS_STANDARD) and default to only building it.
For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
are also part of $(ARCHS_STANDARD).
All thoses rules are coded in the construction of the |XcodeArchsDefault|
object to use depending on the version of Xcode detected. The object is
for performance reason."""
global XCODE_ARCHS_DEFAULT_CACHE
if XCODE_ARCHS_DEFAULT_CACHE:
return XCODE_ARCHS_DEFAULT_CACHE
xcode_version, _ = XcodeVersion()
if xcode_version < '0500':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['armv7']))
elif xcode_version < '0510':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD_INCLUDING_64_BIT)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s'],
['armv7', 'armv7s', 'arm64']))
else:
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s', 'arm64'],
['armv7', 'armv7s', 'arm64']))
return XCODE_ARCHS_DEFAULT_CACHE
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
_sdk_root_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def IsBinaryOutputFormat(self, configname):
default = "binary" if self.isIOS else "xml"
format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT',
default)
return format == "binary"
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def _IsIosAppExtension(self):
return int(self.spec.get('ios_app_extension', 0)) != 0
def _IsIosWatchKitExtension(self):
return int(self.spec.get('ios_watchkit_extension', 0)) != 0
def _IsIosWatchApp(self):
return int(self.spec.get('ios_watch_app', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
if self._IsIosAppExtension() or self._IsIosWatchKitExtension():
return '.' + self.spec.get('product_extension', 'appex')
else:
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsIosAppExtension():
assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.app-extension'
if self._IsIosWatchKitExtension():
assert self._IsBundle(), ('ios_watchkit_extension flag requires '
'mac_bundle (target %s)' % self.spec['target_name'])
return 'com.apple.product-type.watchkit-extension'
if self._IsIosWatchApp():
assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.application.watchapp'
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
config_settings = self.xcode_settings[configname]
xcode_archs_default = GetXcodeArchsDefault()
return xcode_archs_default.ActiveArchs(
config_settings.get('ARCHS'),
config_settings.get('VALID_ARCHS'),
config_settings.get('SDKROOT'))
def _GetSdkVersionInfoItem(self, sdk, infoitem):
# xcodebuild requires Xcode and can't run on Command Line Tools-only
# systems from 10.7 onward.
# Since the CLT has no SDK paths anyway, returning None is the
# most sensible route and should still do the right thing.
try:
return GetStdout(['xcodebuild', '-version', '-sdk', sdk, infoitem])
except:
pass
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
return self._XcodeSdkPath(sdk_root)
def _XcodeSdkPath(self, sdk_root):
if sdk_root not in XcodeSettings._sdk_path_cache:
sdk_path = self._GetSdkVersionInfoItem(sdk_root, 'Path')
XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
if sdk_root:
XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings() and sdk_root:
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
# In Xcode, this is only activated when GCC_COMPILER_VERSION is clang or
# llvm-gcc. It also requires a fairly recent libtool, and
# if the system clang isn't used, DYLD_LIBRARY_PATH needs to contain the
# path to the libLTO.dylib that matches the used clang.
if self._Test('LLVM_LTO', 'YES', default='NO'):
cflags.append('-flto')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
if sdk_root:
framework_root = sdk_root
else:
framework_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = r'(\S+)'
WORD = r'\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings() and self._SdkPath():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
sdk_root = self._SdkPath()
if not sdk_root:
sdk_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension()
if sdk_root and is_extension:
# Adds the link flags for extensions. These flags are common for all
# extensions and provide loader and main function.
# These flags reflect the compilation options used by xcode to compile
# extensions.
ldflags.append('-lpkstart')
if XcodeVersion() < '0900':
ldflags.append(sdk_root +
'/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit')
ldflags.append('-fapplication-extension')
ldflags.append('-Xlinker -rpath '
'-Xlinker @executable_path/../../Frameworks')
self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self.spec['type'] == 'loadable_module' and self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and self.spec['type'] == 'executable'):
return []
settings = self.xcode_settings[configname]
key = self._GetIOSCodeSignIdentityKey(settings)
if not key:
return []
# Warn for any unimplemented signing xcode keys.
unimpl = ['OTHER_CODE_SIGN_FLAGS']
unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring: %s' % (
', '.join(sorted(unimpl)))
return ['%s code-sign-bundle "%s" "%s" "%s" "%s"' % (
os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
settings.get('CODE_SIGN_RESOURCE_RULES_PATH', ''),
settings.get('CODE_SIGN_ENTITLEMENTS', ''),
settings.get('PROVISIONING_PROFILE', ''))
]
def _GetIOSCodeSignIdentityKey(self, settings):
identity = settings.get('CODE_SIGN_IDENTITY')
if not identity:
return None
if identity not in XcodeSettings._codesigning_key_cache:
output = subprocess.check_output(
['security', 'find-identity', '-p', 'codesigning', '-v'])
for line in output.splitlines():
if identity in line:
fingerprint = line.split()[1]
cache = XcodeSettings._codesigning_key_cache
assert identity not in cache or fingerprint == cache[identity], (
"Multiple codesigning fingerprints for identity: %s" % identity)
XcodeSettings._codesigning_key_cache[identity] = fingerprint
return XcodeSettings._codesigning_key_cache.get(identity, '')
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
sdk_root = self._SdkPath(config_name)
if not sdk_root:
sdk_root = ''
# Xcode 7 started shipping with ".tbd" (text based stubs) files instead of
# ".dylib" without providing a real support for them. What it does, for
# "/usr/lib" libraries, is do "-L/usr/lib -lname" which is dependent on the
# library order and cause collision when building Chrome.
#
# Instead substitude ".tbd" to ".dylib" in the generated project when the
# following conditions are both true:
# - library is referenced in the gyp file as "$(SDKROOT)/**/*.dylib",
# - the ".dylib" file does not exists but a ".tbd" file do.
library = l.replace('$(SDKROOT)', sdk_root)
if l.startswith('$(SDKROOT)'):
basename, ext = os.path.splitext(library)
if ext == '.dylib' and not os.path.exists(library):
tbd_library = basename + '.tbd'
if os.path.exists(tbd_library):
library = tbd_library
return library
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return GetStdout(['sw_vers', '-buildVersion'])
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
sdk_root = self._SdkRoot(configname)
if not sdk_root:
sdk_root = self._DefaultSdkRoot()
cache['DTSDKName'] = sdk_root
if xcode >= '0430':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductBuildVersion')
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['DTPlatformName'] = cache['DTSDKName']
if configname.endswith("iphoneos"):
cache['DTPlatformVersion'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductVersion')
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
def _DefaultSdkRoot(self):
"""Returns the default SDKROOT to use.
Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
project, then the environment variable was empty. Starting with this
version, Xcode uses the name of the newest SDK installed.
"""
xcode_version, xcode_build = XcodeVersion()
if xcode_version < '0500':
return ''
default_sdk_path = self._XcodeSdkPath('')
default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
if default_sdk_root:
return default_sdk_root
try:
all_sdks = GetStdout(['xcodebuild', '-showsdks'])
except:
# If xcodebuild fails, there will be no valid SDKs
return ''
for line in all_sdks.splitlines():
items = line.split()
if len(items) >= 3 and items[-2] == '-sdk':
sdk_root = items[-1]
sdk_path = self._XcodeSdkPath(sdk_root)
if sdk_path == default_sdk_path:
return sdk_root
return ''
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def XcodeVersion():
"""Returns a tuple of version and build version of installed Xcode."""
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
global XCODE_VERSION_CACHE
if XCODE_VERSION_CACHE:
return XCODE_VERSION_CACHE
try:
version_list = GetStdout(['xcodebuild', '-version']).splitlines()
# In some circumstances xcodebuild exits 0 but doesn't return
# the right results; for example, a user on 10.7 or 10.8 with
# a bogus path set via xcode-select
# In that case this may be a CLT-only install so fall back to
# checking that version.
if len(version_list) < 2:
raise GypError("xcodebuild returned unexpected results")
except:
version = CLTVersion()
if version:
version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0]
else:
raise GypError("No Xcode or CLT version detected!")
# The CLT has no build information, so we return an empty string.
version_list = [version, '']
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
if build:
build = build.split()[-1]
XCODE_VERSION_CACHE = (version, build)
return XCODE_VERSION_CACHE
# This function ported from the logic in Homebrew's CLT version check
def CLTVersion():
"""Returns the version of command-line tools from pkgutil."""
# pkgutil output looks like
# package-id: com.apple.pkg.CLTools_Executables
# version: 5.0.1.0.1.1382131676
# volume: /
# location: /
# install-time: 1382544035
# groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
regex = re.compile('version: (?P<version>.+)')
for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
try:
output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
return re.search(regex, output).groupdict()['version']
except:
continue
def GetStdout(cmdlist):
"""Returns the content of standard output returned by invoking |cmdlist|.
Raises |GypError| if the command return with a non-zero return code."""
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_FRAMEWORKS_DIR' : built_products_dir,
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if XcodeVersion() >= '0500' and not env.get('SDKROOT'):
sdk_root = xcode_settings._SdkRoot(configuration)
if not sdk_root:
sdk_root = xcode_settings._XcodeSdkPath('')
if sdk_root is None:
sdk_root = ''
env['SDKROOT'] = sdk_root
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices and use correct architectures for those builds."""
for target_dict in targets.itervalues():
toolset = target_dict['toolset']
configs = target_dict['configurations']
for config_name, config_dict in dict(configs).iteritems():
iphoneos_config_dict = copy.deepcopy(config_dict)
configs[config_name + '-iphoneos'] = iphoneos_config_dict
configs[config_name + '-iphonesimulator'] = config_dict
if toolset == 'target':
iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts
| mit |
rbheemana/Sqoop-Automated | code/workflows/wf_db_ingest/run_validations.py | 1 | 9437 | #!/opt/cloudera/parcels/Anaconda/bin/python
# Purpose: This Job is the starting point of all jobs.
# Looks up jobnames.list and calls the script
import sys, os, commands, datetime, time ,getpass, errno
from optparse import OptionParser
import subprocess
from subprocess import Popen, PIPE
def arg_handle():
usage = "usage: run_validations.py [options]"
parser = OptionParser(usage)
parser.add_option("-a", "--user",dest="user_name",
help="increment field max bound")
parser.add_option("-b", "--domain", dest="domain",
help="application name")
parser.add_option("-c", "--hv_db", dest="hv_db",
help="application name")
parser.add_option("-d", "--hv_table", dest="hv_table",
help="application name")
parser.add_option("-e", "--hv_db_stage", dest="hv_db_stage",
help="application name")
parser.add_option("--validation_type", dest="validation_type",
help="validation type")
parser.add_option("-f", "--stage_table", dest="stage_table",
help="application name")
parser.add_option("--where_hadoop", dest="where_hadoop",
help="hive table where clause")
parser.add_option("--jdbc_connect", dest="jdbc_connect",
help="jdbc_connect")
parser.add_option("--table", dest="table",
help="table")
parser.add_option("--where", dest="where",
help="where")
parser.add_option("--username", dest="username",
help="username")
parser.add_option("--datasource", dest="datasource",
help="datasource")
parser.add_option("-g", "--keystore", dest="keystore",
help="application name")
parser.add_option("-i", "--impalaConnect", dest="impalaConnect",
help="application name")
(options, args) = parser.parse_args()
print("run_validations.py -> Input : " + str(options))
return options
def main():
print("run_validations.py -> Started Run_validations.py")
sys.path.append('/opt/cloudera/parcels/Anaconda/lib/python27.zip')
sys.path.append('/opt/cloudera/parcels/Anaconda/lib/python2.7')
sys.path.append('/opt/cloudera/parcels/Anaconda/lib/python2.7/plat-linux2')
sys.path.append('/opt/cloudera/parcels/Anaconda/lib/python2.7/lib-tk')
sys.path.append('/opt/cloudera/parcels/Anaconda/lib/python2.7/lib-old')
sys.path.append('/opt/cloudera/parcels/Anaconda/lib/python2.7/lib-dynload')
sys.path.append('/opt/cloudera/parcels/Anaconda/lib/python2.7/site-packages')
sys.path.append('/opt/cloudera/parcels/Anaconda/lib/python2.7/site-packages/Sphinx-1.3.5-py2.7.egg')
sys.path.append('/opt/cloudera/parcels/Anaconda/lib/python2.7/site-packages/cryptography-1.0.2-py2.7-linux-x86_64.egg')
sys.path.append('/opt/cloudera/parcels/Anaconda/lib/python2.7/site-packages/setuptools-25.1.6-py2.7.egg')
rc, out = commands.getstatusoutput("echo $PATH")
print("run_validations.py -> PATH Variable: "+out)
sys.stdout.flush()
options = arg_handle()
#remove keytab file if exists
rm = "rm "+options.user_name.lower()+".keytab "
rc, out = commands.getstatusoutput(rm)
#get latest keytab file from hdfs
hget = "hdfs dfs -get "+options.keystore+"/"+options.user_name+".keytab "
rc, out = commands.getstatusoutput(hget)
print("run_validations.py -> Command : "+hget+"\n"+out+"\n RC:"+str(rc))
sys.stdout.flush()
#Authenticate using keytab file
kerb = "kinit -k -t "+options.user_name+".keytab "+options.user_name+options.domain
rc, out = commands.getstatusoutput(kerb)
if rc == 0:
print("run_validations.py -> Authenticated: "+kerb+" RC:"+str(rc))
else:
print("run_validations.py -> ERROR : Authentication failed"+kerb+"\n"+out+"\n RC:"+str(rc))
sys.stdout.flush()
impalaCmd=options.impalaConnect+'" refresh '+options.hv_db+"."+options.hv_table+";refresh "+options.hv_db_stage+"."+options.stage_table+';"'
print "run_validations.py -> " + impalaCmd
sys.stdout.flush()
rc, out = commands.getstatusoutput(impalaCmd)
if rc !=0:
print("run_validations.py -> ERROR : Impala command failed"+str(out)+" RC:"+str(rc))
sys.exit(1)
print("run_validations.py -> SUCCESS : Tables refereshed")
sys.stdout.flush()
if 'default' in options.validation_type.lower():
print("run_validations.py -> OPTION : Validate RAW Table and FINAL Table")
validate_stg_final_tables(options)
if 'full' in options.validation_type.lower() and options.datasource.strip().lower() == 'oracle':
print("run_validations.py -> OPTION : Validate Total counts between RDBMS Source Table and HIVE FINAL Table")
validate_full_rdbms_final_tables(options)
sys.stdout.flush()
if 'delta' in options.validation_type.lower() and options.datasource.strip().lower() == 'oracle':
print("run_validations.py -> OPTION : Validate counts between RDBMS Source Table and HIVE RAW Table for rows we are importing")
validate_delta_rdbms_final_tables(options)
sys.stdout.flush()
def validate_stg_final_tables(options):
impalaCmd=options.impalaConnect+'"select count(*) from '+options.hv_db+"."+options.hv_table+" where " +options.where_hadoop+' ; "'
print "run_validations.py -> COMMAND : " + impalaCmd
rc, out = commands.getstatusoutput(impalaCmd)
if rc !=0:
print("run_validations.py -> ERROR : Impala command failed"+str(out)+" RC:"+str(rc))
sys.exit(1)
outputlist = out.split('\n')
hadoop_final_count = str(outputlist[-1]).strip()
print("run_validations.py -> RESULT : Count from Hive Table "+options.hv_db+"."+options.hv_table+":"+str(hadoop_final_count))
sys.stdout.flush()
impalaCmd=options.impalaConnect+"' refresh "+options.hv_db_stage+"."+options.stage_table+"; select count(*) from "+options.hv_db_stage+"."+options.stage_table+"; '"
print "run_validations.py -> COMMAND : " + impalaCmd
sys.stdout.flush()
rc, out = commands.getstatusoutput(impalaCmd)
if rc !=0:
print("run_validations.py -> ERROR : Impala command failed"+str(out)+" RC:"+str(rc))
sys.exit(1)
outputlist = out.split('\n')
hadoop_stage_count = str(outputlist[-1]).strip()
print("run_validations.py -> RESULT : Count from Hive Stage Table "+options.hv_db_stage+"."+options.stage_table+":"+str(hadoop_stage_count))
sys.stdout.flush()
if(hadoop_stage_count != hadoop_final_count):
print("run_validations.py -> ERROR: Counts of Hive Stage Table and Hive Final Table do not match")
sys.exit(4)
print("run_validations.py -> SUCCESS : Hurray!! Counts of Hive Stage Table and Hive Final Table match..")
sys.stdout.flush()
def validate_delta_rdbms_final_tables(options):
import cx_Oracle
impalaCmd=options.impalaConnect+'"select count(*) from '+options.hv_db_stage+"."+options.stage_table+'; "'
print "run_validations.py -> COMMAND : " + impalaCmd
rc, out = commands.getstatusoutput(impalaCmd)
if rc !=0:
print("run_validations.py -> ERROR : Impala command failed"+str(out)+" RC:"+str(rc))
sys.exit(1)
outputlist = out.split('\n')
hadoop_stage_count = str(outputlist[-1]).strip()
print("run_validations.py -> RESULT : Count from Hive Table "+options.hv_db_stage+"."+options.stage_table+":"+str(hadoop_stage_count))
rdbms_connect = options.jdbc_connect.strip().split('@')[1]
db = cx_Oracle.connect(options.username.strip()+'/Hadoop#123@'+rdbms_connect)
cursor = db.cursor()
rdbms_query = "select count(*) from "+options.table+" where "+options.where
print("run_validations.py -> COMMAND : "+rdbms_query)
cursor.execute(rdbms_query)
oracle_results = []
for row in cursor:
oracle_results.append(row[0])
rdbms_oracle_count = str(oracle_results[0]).strip()
print("run_validations.py -> RESULT : Count from RDBMS Table "+str(oracle_results[0]))
if(rdbms_oracle_count != hadoop_stage_count):
print("run_validations.py -> ERROR: Counts of RDBMS Table and Hive Final Table do not match")
sys.exit(4)
print("run_validations.py -> SUCCESS : Hurray!! Counts of RDBMS Table and Hive RAW Table match..")
def validate_full_rdbms_final_tables(options):
import cx_Oracle
impalaCmd=options.impalaConnect+'"select count(*) from '+options.hv_db+"."+options.hv_table+'; "'
print "run_validations.py -> COMMAND : " + impalaCmd
rc, out = commands.getstatusoutput(impalaCmd)
if rc !=0:
print("run_validations.py -> ERROR : Impala command failed"+str(out)+" RC:"+str(rc))
sys.exit(1)
outputlist = out.split('\n')
hadoop_final_count = str(outputlist[-1]).strip()
print("run_validations.py -> RESULT : Count from Hive Table "+options.hv_db+"."+options.hv_table+":"+str(hadoop_final_count))
rdbms_connect = options.jdbc_connect.strip().split('@')[1]
db = cx_Oracle.connect(options.username.strip()+'/Hadoop#123@'+rdbms_connect)
cursor = db.cursor()
rdbms_query = "select count(*) from "+options.table
print("run_validations.py -> COMMAND : "+rdbms_query)
cursor.execute(rdbms_query)
oracle_results = []
for row in cursor:
oracle_results.append(row[0])
rdbms_oracle_count = str(oracle_results[0]).strip()
print("run_validations.py -> RESULT : Count from RDBMS Table "+str(oracle_results[0]))
if(rdbms_oracle_count != hadoop_final_count):
print("run_validations.py -> ERROR: Counts of RDBMS Table and Hive Final Table do not match")
sys.exit(4)
print("run_validations.py -> SUCCESS : Hurray!! Counts of RDBMS Table and Hive Final Table match..")
if __name__ == "__main__":
main()
| apache-2.0 |
density215/d215-miniblog | unidecode/x091.py | 252 | 4655 | data = (
'Ruo ', # 0x00
'Bei ', # 0x01
'E ', # 0x02
'Yu ', # 0x03
'Juan ', # 0x04
'Yu ', # 0x05
'Yun ', # 0x06
'Hou ', # 0x07
'Kui ', # 0x08
'Xiang ', # 0x09
'Xiang ', # 0x0a
'Sou ', # 0x0b
'Tang ', # 0x0c
'Ming ', # 0x0d
'Xi ', # 0x0e
'Ru ', # 0x0f
'Chu ', # 0x10
'Zi ', # 0x11
'Zou ', # 0x12
'Ju ', # 0x13
'Wu ', # 0x14
'Xiang ', # 0x15
'Yun ', # 0x16
'Hao ', # 0x17
'Yong ', # 0x18
'Bi ', # 0x19
'Mo ', # 0x1a
'Chao ', # 0x1b
'Fu ', # 0x1c
'Liao ', # 0x1d
'Yin ', # 0x1e
'Zhuan ', # 0x1f
'Hu ', # 0x20
'Qiao ', # 0x21
'Yan ', # 0x22
'Zhang ', # 0x23
'Fan ', # 0x24
'Qiao ', # 0x25
'Xu ', # 0x26
'Deng ', # 0x27
'Bi ', # 0x28
'Xin ', # 0x29
'Bi ', # 0x2a
'Ceng ', # 0x2b
'Wei ', # 0x2c
'Zheng ', # 0x2d
'Mao ', # 0x2e
'Shan ', # 0x2f
'Lin ', # 0x30
'Po ', # 0x31
'Dan ', # 0x32
'Meng ', # 0x33
'Ye ', # 0x34
'Cao ', # 0x35
'Kuai ', # 0x36
'Feng ', # 0x37
'Meng ', # 0x38
'Zou ', # 0x39
'Kuang ', # 0x3a
'Lian ', # 0x3b
'Zan ', # 0x3c
'Chan ', # 0x3d
'You ', # 0x3e
'Qi ', # 0x3f
'Yan ', # 0x40
'Chan ', # 0x41
'Zan ', # 0x42
'Ling ', # 0x43
'Huan ', # 0x44
'Xi ', # 0x45
'Feng ', # 0x46
'Zan ', # 0x47
'Li ', # 0x48
'You ', # 0x49
'Ding ', # 0x4a
'Qiu ', # 0x4b
'Zhuo ', # 0x4c
'Pei ', # 0x4d
'Zhou ', # 0x4e
'Yi ', # 0x4f
'Hang ', # 0x50
'Yu ', # 0x51
'Jiu ', # 0x52
'Yan ', # 0x53
'Zui ', # 0x54
'Mao ', # 0x55
'Dan ', # 0x56
'Xu ', # 0x57
'Tou ', # 0x58
'Zhen ', # 0x59
'Fen ', # 0x5a
'Sakenomoto ', # 0x5b
'[?] ', # 0x5c
'Yun ', # 0x5d
'Tai ', # 0x5e
'Tian ', # 0x5f
'Qia ', # 0x60
'Tuo ', # 0x61
'Zuo ', # 0x62
'Han ', # 0x63
'Gu ', # 0x64
'Su ', # 0x65
'Po ', # 0x66
'Chou ', # 0x67
'Zai ', # 0x68
'Ming ', # 0x69
'Luo ', # 0x6a
'Chuo ', # 0x6b
'Chou ', # 0x6c
'You ', # 0x6d
'Tong ', # 0x6e
'Zhi ', # 0x6f
'Xian ', # 0x70
'Jiang ', # 0x71
'Cheng ', # 0x72
'Yin ', # 0x73
'Tu ', # 0x74
'Xiao ', # 0x75
'Mei ', # 0x76
'Ku ', # 0x77
'Suan ', # 0x78
'Lei ', # 0x79
'Pu ', # 0x7a
'Zui ', # 0x7b
'Hai ', # 0x7c
'Yan ', # 0x7d
'Xi ', # 0x7e
'Niang ', # 0x7f
'Wei ', # 0x80
'Lu ', # 0x81
'Lan ', # 0x82
'Yan ', # 0x83
'Tao ', # 0x84
'Pei ', # 0x85
'Zhan ', # 0x86
'Chun ', # 0x87
'Tan ', # 0x88
'Zui ', # 0x89
'Chuo ', # 0x8a
'Cu ', # 0x8b
'Kun ', # 0x8c
'Ti ', # 0x8d
'Mian ', # 0x8e
'Du ', # 0x8f
'Hu ', # 0x90
'Xu ', # 0x91
'Xing ', # 0x92
'Tan ', # 0x93
'Jiu ', # 0x94
'Chun ', # 0x95
'Yun ', # 0x96
'Po ', # 0x97
'Ke ', # 0x98
'Sou ', # 0x99
'Mi ', # 0x9a
'Quan ', # 0x9b
'Chou ', # 0x9c
'Cuo ', # 0x9d
'Yun ', # 0x9e
'Yong ', # 0x9f
'Ang ', # 0xa0
'Zha ', # 0xa1
'Hai ', # 0xa2
'Tang ', # 0xa3
'Jiang ', # 0xa4
'Piao ', # 0xa5
'Shan ', # 0xa6
'Yu ', # 0xa7
'Li ', # 0xa8
'Zao ', # 0xa9
'Lao ', # 0xaa
'Yi ', # 0xab
'Jiang ', # 0xac
'Pu ', # 0xad
'Jiao ', # 0xae
'Xi ', # 0xaf
'Tan ', # 0xb0
'Po ', # 0xb1
'Nong ', # 0xb2
'Yi ', # 0xb3
'Li ', # 0xb4
'Ju ', # 0xb5
'Jiao ', # 0xb6
'Yi ', # 0xb7
'Niang ', # 0xb8
'Ru ', # 0xb9
'Xun ', # 0xba
'Chou ', # 0xbb
'Yan ', # 0xbc
'Ling ', # 0xbd
'Mi ', # 0xbe
'Mi ', # 0xbf
'Niang ', # 0xc0
'Xin ', # 0xc1
'Jiao ', # 0xc2
'Xi ', # 0xc3
'Mi ', # 0xc4
'Yan ', # 0xc5
'Bian ', # 0xc6
'Cai ', # 0xc7
'Shi ', # 0xc8
'You ', # 0xc9
'Shi ', # 0xca
'Shi ', # 0xcb
'Li ', # 0xcc
'Zhong ', # 0xcd
'Ye ', # 0xce
'Liang ', # 0xcf
'Li ', # 0xd0
'Jin ', # 0xd1
'Jin ', # 0xd2
'Qiu ', # 0xd3
'Yi ', # 0xd4
'Diao ', # 0xd5
'Dao ', # 0xd6
'Zhao ', # 0xd7
'Ding ', # 0xd8
'Po ', # 0xd9
'Qiu ', # 0xda
'He ', # 0xdb
'Fu ', # 0xdc
'Zhen ', # 0xdd
'Zhi ', # 0xde
'Ba ', # 0xdf
'Luan ', # 0xe0
'Fu ', # 0xe1
'Nai ', # 0xe2
'Diao ', # 0xe3
'Shan ', # 0xe4
'Qiao ', # 0xe5
'Kou ', # 0xe6
'Chuan ', # 0xe7
'Zi ', # 0xe8
'Fan ', # 0xe9
'Yu ', # 0xea
'Hua ', # 0xeb
'Han ', # 0xec
'Gong ', # 0xed
'Qi ', # 0xee
'Mang ', # 0xef
'Ri ', # 0xf0
'Di ', # 0xf1
'Si ', # 0xf2
'Xi ', # 0xf3
'Yi ', # 0xf4
'Chai ', # 0xf5
'Shi ', # 0xf6
'Tu ', # 0xf7
'Xi ', # 0xf8
'Nu ', # 0xf9
'Qian ', # 0xfa
'Ishiyumi ', # 0xfb
'Jian ', # 0xfc
'Pi ', # 0xfd
'Ye ', # 0xfe
'Yin ', # 0xff
)
| bsd-3-clause |
user404d/CS6601-Project | local_opt.py | 3 | 1781 | import sys
import re
if sys.argv[1] == "-":
file = sys.stdin
else:
file = open( sys.argv[1], 'r' )
int_re = re.compile("^[0-9]")
var_map = {}
party_mode = 0
could_be_local = {}
local_lines = { 1 : "", 2: "" }
neutral = {}
linenum = 0
for line in file.xreadlines():
args = line.split()
linenum += 1
if len( args ) == 0:
continue
if args[0] == ".input":
var_map[ args[1] ] = int( args[2] )
elif args[0] == ".remove":
del var_map[ args[1] ]
elif args[0][0] == ".":
continue
else:
party_arg = []
for arg in args[2:]:
arg_party = 0
if re.match(int_re, arg) != None:
arg_party = 0
elif not arg in var_map:
raise Exception("cannot identify "+arg+" on line "+str(linenum) )
elif var_map[arg] == 0:
neutral[arg] = line
party_arg.append( arg_party )
if party_arg > 0 and party_arg != party_mode:
local_lines[party_arg] += line
could_be_local[ args[0] ] = party_arg
var_map[ args[0] ] = party_arg
in_party = False
printed_locals = False
file.seek(0)
for line in file.xreadlines():
args = line.split()
if args[0] == ".startparty":
in_party = True
elif args[0] == ".endparty":
in_party = False
elif args[0] == ".remove" and args[1] in could_be_local:
continue
elif not printed_locals and not in_party and args[0][0] != ".":
print ".startparty 1"
print local_lines[1],
print ".endparty 1"
print ".startparty 2"
print local_lines[2],
print ".endparty 2"
printed_locals = True
if not args[0] in could_be_local:
print line.strip()
| mit |
google/irrduino | IrrduinoServer/irrduinoserver/handlers/irrigatehandler.py | 7 | 2458 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the welcome page for the app."""
from google.appengine.ext import webapp
from irrduinoserver.utils import web as webutils
from irrduinoserver.utils import irrduino as irrduinoutils
from irrduinoserver.utils import ui as uiutils
SECS_PER_MINUTE = 60
MAX_TIME_MINS = 10
MIN_TIME_SECS = 1
MAX_TIME_SECS = MAX_TIME_MINS * SECS_PER_MINUTE
class IrrigateHandler(webapp.RequestHandler):
def get(self, template_params=None):
if template_params is None:
template_params = {}
template_params["tabs"] = uiutils.generate_tabs("irrigate")
template_params["zones"] = sorted(irrduinoutils.ZONES.items())
template_params["secs_and_mins"] = \
[(mins * SECS_PER_MINUTE, mins)
for mins in xrange(1, MAX_TIME_MINS + 1)]
webutils.render_to_response(self, "irrigate.html", template_params)
def post(self):
"""Control the sprinklers.
Use assertions for IrrduinoController errors and ValueError exceptions for
unexpected user input errors.
"""
if self.request.get("get-system-status"):
response = irrduinoutils.execute("/status")
assert response
elif self.request.get("water-zone"):
zone = int(self.request.get("zone"))
secs = int(self.request.get("secs"))
if not zone in irrduinoutils.ZONES:
raise ValueError("Invalid zone: %s" % zone)
if not (MIN_TIME_SECS <= secs <= MAX_TIME_SECS):
raise ValueError("Invalid secs: %s" % secs)
response = irrduinoutils.execute("/zone%s/on/%ss" % (zone, secs))
assert response["zone%s" % zone] == "on"
assert int(response["time"]) == secs
elif self.request.get("turn-off-everything"):
response = irrduinoutils.execute("/off")
assert response["zones"] == "ALL OFF"
assert response["zones"] == "ALL OFF"
else:
raise ValueError("Invalid submit button")
self.get({"status": response})
| apache-2.0 |
Akrog/cinder | cinder/api/contrib/services.py | 4 | 7077 | # Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import timeutils
import webob.exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('volume', 'services')
class ServicesIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('services')
elem = xmlutil.SubTemplateElement(root, 'service', selector='services')
elem.set('binary')
elem.set('host')
elem.set('zone')
elem.set('status')
elem.set('state')
elem.set('update_at')
elem.set('disabled_reason')
return xmlutil.MasterTemplate(root, 1)
class ServicesUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
# TODO(uni): template elements of 'host', 'service' and 'disabled'
# should be deprecated to make ServicesUpdateTemplate consistent
# with ServicesIndexTemplate. Still keeping it here for API
# compatibility sake.
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('service')
root.set('disabled')
root.set('binary')
root.set('status')
root.set('disabled_reason')
return xmlutil.MasterTemplate(root, 1)
class ServiceController(wsgi.Controller):
def __init__(self, ext_mgr=None):
self.ext_mgr = ext_mgr
super(ServiceController, self).__init__()
@wsgi.serializers(xml=ServicesIndexTemplate)
def index(self, req):
"""Return a list of all running services.
Filter by host & service name.
"""
context = req.environ['cinder.context']
authorize(context)
detailed = self.ext_mgr.is_loaded('os-extended-services')
now = timeutils.utcnow()
services = db.service_get_all(context)
host = ''
if 'host' in req.GET:
host = req.GET['host']
service = ''
if 'service' in req.GET:
service = req.GET['service']
LOG.deprecated(_("Query by service parameter is deprecated. "
"Please use binary parameter instead."))
binary = ''
if 'binary' in req.GET:
binary = req.GET['binary']
if host:
services = [s for s in services if s['host'] == host]
# NOTE(uni): deprecating service request key, binary takes precedence
binary_key = binary or service
if binary_key:
services = [s for s in services if s['binary'] == binary_key]
svcs = []
for svc in services:
delta = now - (svc['updated_at'] or svc['created_at'])
alive = abs(delta.total_seconds()) <= CONF.service_down_time
art = (alive and "up") or "down"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
ret_fields = {'binary': svc['binary'], 'host': svc['host'],
'zone': svc['availability_zone'],
'status': active, 'state': art,
'updated_at': svc['updated_at']}
if detailed:
ret_fields['disabled_reason'] = svc['disabled_reason']
svcs.append(ret_fields)
return {'services': svcs}
def _is_valid_as_reason(self, reason):
if not reason:
return False
try:
utils.check_string_length(reason.strip(), 'Disabled reason',
min_length=1, max_length=255)
except exception.InvalidInput:
return False
return True
@wsgi.serializers(xml=ServicesUpdateTemplate)
def update(self, req, id, body):
"""Enable/Disable scheduling for a service."""
context = req.environ['cinder.context']
authorize(context)
ext_loaded = self.ext_mgr.is_loaded('os-extended-services')
ret_val = {}
if id == "enable":
disabled = False
status = "enabled"
if ext_loaded:
ret_val['disabled_reason'] = None
elif (id == "disable" or
(id == "disable-log-reason" and ext_loaded)):
disabled = True
status = "disabled"
else:
raise webob.exc.HTTPNotFound(explanation=_("Unknown action"))
try:
host = body['host']
except (TypeError, KeyError):
raise webob.exc.HTTPBadRequest()
ret_val['disabled'] = disabled
if id == "disable-log-reason" and ext_loaded:
reason = body.get('disabled_reason')
if not self._is_valid_as_reason(reason):
msg = _('Disabled reason contains invalid characters '
'or is too long')
raise webob.exc.HTTPBadRequest(explanation=msg)
ret_val['disabled_reason'] = reason
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
service = body.get('service', '')
binary = body.get('binary', '')
binary_key = binary or service
if not binary_key:
raise webob.exc.HTTPBadRequest()
try:
svc = db.service_get_by_args(context, host, binary_key)
if not svc:
raise webob.exc.HTTPNotFound(explanation=_('Unknown service'))
db.service_update(context, svc['id'], ret_val)
except exception.ServiceNotFound:
raise webob.exc.HTTPNotFound(explanation=_("service not found"))
ret_val.update({'host': host, 'service': service,
'binary': binary, 'status': status})
return ret_val
class Services(extensions.ExtensionDescriptor):
"""Services support."""
name = "Services"
alias = "os-services"
namespace = "http://docs.openstack.org/volume/ext/services/api/v2"
updated = "2012-10-28T00:00:00-00:00"
def get_resources(self):
resources = []
controller = ServiceController(self.ext_mgr)
resource = extensions.ResourceExtension('os-services', controller)
resources.append(resource)
return resources
| apache-2.0 |
Nikoala/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/mechanize/_rfc3986.py | 134 | 7665 | """RFC 3986 URI parsing and relative reference resolution / absolutization.
(aka splitting and joining)
Copyright 2006 John J. Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
# XXX Wow, this is ugly. Overly-direct translation of the RFC ATM.
import re, urllib
## def chr_range(a, b):
## return "".join(map(chr, range(ord(a), ord(b)+1)))
## UNRESERVED_URI_CHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
## "abcdefghijklmnopqrstuvwxyz"
## "0123456789"
## "-_.~")
## RESERVED_URI_CHARS = "!*'();:@&=+$,/?#[]"
## URI_CHARS = RESERVED_URI_CHARS+UNRESERVED_URI_CHARS+'%'
# this re matches any character that's not in URI_CHARS
BAD_URI_CHARS_RE = re.compile("[^A-Za-z0-9\-_.~!*'();:@&=+$,/?%#[\]]")
def clean_url(url, encoding):
# percent-encode illegal URI characters
# Trying to come up with test cases for this gave me a headache, revisit
# when do switch to unicode.
# Somebody else's comments (lost the attribution):
## - IE will return you the url in the encoding you send it
## - Mozilla/Firefox will send you latin-1 if there's no non latin-1
## characters in your link. It will send you utf-8 however if there are...
if type(url) == type(""):
url = url.decode(encoding, "replace")
url = url.strip()
# for second param to urllib.quote(), we want URI_CHARS, minus the
# 'always_safe' characters that urllib.quote() never percent-encodes
return urllib.quote(url.encode(encoding), "!*'();:@&=+$,/?%#[]~")
def is_clean_uri(uri):
"""
>>> is_clean_uri("ABC!")
True
>>> is_clean_uri(u"ABC!")
True
>>> is_clean_uri("ABC|")
False
>>> is_clean_uri(u"ABC|")
False
>>> is_clean_uri("http://example.com/0")
True
>>> is_clean_uri(u"http://example.com/0")
True
"""
# note module re treats bytestrings as through they were decoded as latin-1
# so this function accepts both unicode and bytestrings
return not bool(BAD_URI_CHARS_RE.search(uri))
SPLIT_MATCH = re.compile(
r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?").match
def urlsplit(absolute_uri):
"""Return scheme, authority, path, query, fragment."""
match = SPLIT_MATCH(absolute_uri)
if match:
g = match.groups()
return g[1], g[3], g[4], g[6], g[8]
def urlunsplit(parts):
scheme, authority, path, query, fragment = parts
r = []
append = r.append
if scheme is not None:
append(scheme)
append(":")
if authority is not None:
append("//")
append(authority)
append(path)
if query is not None:
append("?")
append(query)
if fragment is not None:
append("#")
append(fragment)
return "".join(r)
def urljoin(base_uri, uri_reference):
"""Join a base URI with a URI reference and return the resulting URI.
See RFC 3986.
"""
return urlunsplit(urljoin_parts(urlsplit(base_uri),
urlsplit(uri_reference)))
# oops, this doesn't do the same thing as the literal translation
# from the RFC below
## import posixpath
## def urljoin_parts(base_parts, reference_parts):
## scheme, authority, path, query, fragment = base_parts
## rscheme, rauthority, rpath, rquery, rfragment = reference_parts
## # compute target URI path
## if rpath == "":
## tpath = path
## else:
## tpath = rpath
## if not tpath.startswith("/"):
## tpath = merge(authority, path, tpath)
## tpath = posixpath.normpath(tpath)
## if rscheme is not None:
## return (rscheme, rauthority, tpath, rquery, rfragment)
## elif rauthority is not None:
## return (scheme, rauthority, tpath, rquery, rfragment)
## elif rpath == "":
## if rquery is not None:
## tquery = rquery
## else:
## tquery = query
## return (scheme, authority, tpath, tquery, rfragment)
## else:
## return (scheme, authority, tpath, rquery, rfragment)
def urljoin_parts(base_parts, reference_parts):
scheme, authority, path, query, fragment = base_parts
rscheme, rauthority, rpath, rquery, rfragment = reference_parts
if rscheme == scheme:
rscheme = None
if rscheme is not None:
tscheme, tauthority, tpath, tquery = (
rscheme, rauthority, remove_dot_segments(rpath), rquery)
else:
if rauthority is not None:
tauthority, tpath, tquery = (
rauthority, remove_dot_segments(rpath), rquery)
else:
if rpath == "":
tpath = path
if rquery is not None:
tquery = rquery
else:
tquery = query
else:
if rpath.startswith("/"):
tpath = remove_dot_segments(rpath)
else:
tpath = merge(authority, path, rpath)
tpath = remove_dot_segments(tpath)
tquery = rquery
tauthority = authority
tscheme = scheme
tfragment = rfragment
return (tscheme, tauthority, tpath, tquery, tfragment)
# um, something *vaguely* like this is what I want, but I have to generate
# lots of test cases first, if only to understand what it is that
# remove_dot_segments really does...
## def remove_dot_segments(path):
## if path == '':
## return ''
## comps = path.split('/')
## new_comps = []
## for comp in comps:
## if comp in ['.', '']:
## if not new_comps or new_comps[-1]:
## new_comps.append('')
## continue
## if comp != '..':
## new_comps.append(comp)
## elif new_comps:
## new_comps.pop()
## return '/'.join(new_comps)
def remove_dot_segments(path):
r = []
while path:
# A
if path.startswith("../"):
path = path[3:]
continue
if path.startswith("./"):
path = path[2:]
continue
# B
if path.startswith("/./"):
path = path[2:]
continue
if path == "/.":
path = "/"
continue
# C
if path.startswith("/../"):
path = path[3:]
if r:
r.pop()
continue
if path == "/..":
path = "/"
if r:
r.pop()
continue
# D
if path == ".":
path = path[1:]
continue
if path == "..":
path = path[2:]
continue
# E
start = 0
if path.startswith("/"):
start = 1
ii = path.find("/", start)
if ii < 0:
ii = None
r.append(path[:ii])
if ii is None:
break
path = path[ii:]
return "".join(r)
def merge(base_authority, base_path, ref_path):
# XXXX Oddly, the sample Perl implementation of this by Roy Fielding
# doesn't even take base_authority as a parameter, despite the wording in
# the RFC suggesting otherwise. Perhaps I'm missing some obvious identity.
#if base_authority is not None and base_path == "":
if base_path == "":
return "/" + ref_path
ii = base_path.rfind("/")
if ii >= 0:
return base_path[:ii+1] + ref_path
return ref_path
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-3.0 |
thaim/ansible | lib/ansible/modules/network/fortios/fortios_router_access_list.py | 13 | 12427 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_router_access_list
short_description: Configure access lists in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify router feature and access_list category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
router_access_list:
description:
- Configure access lists.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
comments:
description:
- Comment.
type: str
name:
description:
- Name.
required: true
type: str
rule:
description:
- Rule.
type: list
suboptions:
action:
description:
- Permit or deny this IP address and netmask prefix.
type: str
choices:
- permit
- deny
exact_match:
description:
- Enable/disable exact match.
type: str
choices:
- enable
- disable
flags:
description:
- Flags.
type: int
id:
description:
- Rule ID.
required: true
type: int
prefix:
description:
- IPv4 prefix to define regular filter criteria, such as "any" or subnets.
type: str
wildcard:
description:
- Wildcard to define Cisco-style wildcard filter criteria.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure access lists.
fortios_router_access_list:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
router_access_list:
comments: "<your_own_value>"
name: "default_name_4"
rule:
-
action: "permit"
exact_match: "enable"
flags: "8"
id: "9"
prefix: "<your_own_value>"
wildcard: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_router_access_list_data(json):
option_list = ['comments', 'name', 'rule']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def router_access_list(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['router_access_list'] and data['router_access_list']:
state = data['router_access_list']['state']
else:
state = True
router_access_list_data = data['router_access_list']
filtered_data = underscore_to_hyphen(filter_router_access_list_data(router_access_list_data))
if state == "present":
return fos.set('router',
'access-list',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('router',
'access-list',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_router(data, fos):
if data['router_access_list']:
resp = router_access_list(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"router_access_list": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"comments": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"rule": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["permit", "deny"]},
"exact_match": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"flags": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"prefix": {"required": False, "type": "str"},
"wildcard": {"required": False, "type": "str"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_router(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_router(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| mit |
rodorad/spark-tk | regression-tests/sparktkregtests/testcases/frames/column_method_drop_test.py | 14 | 3803 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests methods that access or alter columns"""
import unittest
from sparktkregtests.lib import sparktk_test
dummy_int_val = -77 # placeholder data value for added column
dummy_col_count = 1000 # length of dummy list for column add
# This method is to test different sources of functions
# i.e. global
def global_dummy_val_list(row):
return [dummy_int_val for _ in range(0, dummy_col_count)]
class ColumnMethodTest(sparktk_test.SparkTKTestCase):
# Test class bound methods
@staticmethod
def static_dummy_val_list(row):
return [dummy_int_val for _ in range(0, dummy_col_count)]
def setUp(self):
"""Build test_frame"""
super(ColumnMethodTest, self).setUp()
dataset = self.get_file("int_str_float.csv")
schema = [("int", int), ("str", str), ("float", float)]
self.frame = self.context.frame.import_csv(dataset, schema=schema)
def test_column_names(self):
"""all original columns"""
header = self.frame.column_names
self.assertEqual(header, ['int', 'str', 'float'])
def test_column_names_drop(self):
"""Exercise subsets of 1 and 2 columns"""
self.frame.drop_columns('str')
header = self.frame.column_names
self.assertEqual(header, ['int', 'float'])
def test_column_names_drop_multiple(self):
"""Drop multiple columns"""
self.frame.drop_columns(['str', 'float'])
header = self.frame.column_names
self.assertEqual(header, ['int'])
def test_drop_non_existent_column(self):
"""test dropping non-existent column"""
with self.assertRaisesRegexp(
ValueError, 'Invalid column name non-existent provided'):
self.frame.drop_columns("non-existent")
def test_drop_columns(self):
"""Test drop columns scenarios"""
self.frame.add_columns(
lambda row: dummy_int_val, ('product', int))
col_count = len(self.frame.take(1)[0])
self.frame.drop_columns(['int'])
self.assertNotIn('int', self.frame.column_names)
self.assertEqual(col_count-1, len(self.frame.take(1)[0]))
def test_drop_columns_multiple(self):
"""Test drop columns multiple, repeated"""
self.frame.add_columns(
lambda row: dummy_int_val, ('product', int))
col_count = len(self.frame.take(1)[0])
self.frame.drop_columns(['str', 'product', 'str'])
self.assertNotIn('str', self.frame.column_names)
self.assertNotIn('product', self.frame.column_names)
self.assertEqual(col_count-2, len(self.frame.take(1)[0]))
def test_drop_zero_columns(self):
"""Test dropping no columns"""
self.frame.drop_columns([])
header = self.frame.column_names
self.assertEqual(header, ['int', 'str', 'float'])
def test_drop_nonexistent_column(self):
"""Test drop non-existent column"""
with self.assertRaisesRegexp(ValueError, 'Invalid column name'):
self.frame.drop_columns(['no-such-name'])
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
chokribr/invenioold | modules/miscutil/lib/memoiseutils.py | 23 | 1221 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Memoisation utilities.
"""
class Memoise:
"""
Basic memoisation helper.
Usage: fun = Memoise(fun)
"""
def __init__(self, function):
"""Initialise."""
self.memo = {}
self.function = function
def __call__(self, *args):
"""Run and eventually memoise."""
if args not in self.memo:
self.memo[args] = self.function(*args)
return self.memo[args]
| gpl-2.0 |
rajeevsingh717/ThinkStats2 | code/analytic.py | 69 | 6265 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import numpy as np
import pandas
import nsfg
import thinkplot
import thinkstats2
def ParetoMedian(xmin, alpha):
"""Computes the median of a Pareto distribution."""
return xmin * pow(2, 1/alpha)
def MakeExpoCdf():
"""Generates a plot of the exponential CDF."""
thinkplot.PrePlot(3)
for lam in [2.0, 1, 0.5]:
xs, ps = thinkstats2.RenderExpoCdf(lam, 0, 3.0, 50)
label = r'$\lambda=%g$' % lam
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_expo_cdf',
title='Exponential CDF',
xlabel='x',
ylabel='CDF')
def ReadBabyBoom(filename='babyboom.dat'):
"""Reads the babyboom data.
filename: string
returns: DataFrame
"""
var_info = [
('time', 1, 8, int),
('sex', 9, 16, int),
('weight_g', 17, 24, int),
('minutes', 25, 32, int),
]
columns = ['name', 'start', 'end', 'type']
variables = pandas.DataFrame(var_info, columns=columns)
variables.end += 1
dct = thinkstats2.FixedWidthVariables(variables, index_base=1)
df = dct.ReadFixedWidth(filename, skiprows=59)
return df
def MakeBabyBoom():
"""Plot CDF of interarrival time on log and linear scales.
"""
# compute the interarrival times
df = ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label='actual')
thinkplot.PrePlot(cols=2)
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='minutes',
ylabel='CDF',
legend=False)
thinkplot.SubPlot(2)
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel='minutes',
ylabel='CCDF',
yscale='log',
legend=False)
thinkplot.Save(root='analytic_interarrivals',
legend=False)
def MakeParetoCdf():
"""Generates a plot of the Pareto CDF."""
xmin = 0.5
thinkplot.PrePlot(3)
for alpha in [2.0, 1.0, 0.5]:
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 10.0, n=100)
thinkplot.Plot(xs, ps, label=r'$\alpha=%g$' % alpha)
thinkplot.Save(root='analytic_pareto_cdf',
title='Pareto CDF',
xlabel='x',
ylabel='CDF')
def MakeParetoCdf2():
"""Generates a plot of the CDF of height in Pareto World."""
xmin = 100
alpha = 1.7
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 1000.0, n=100)
thinkplot.Plot(xs, ps)
thinkplot.Save(root='analytic_pareto_height',
title='Pareto CDF',
xlabel='height (cm)',
ylabel='CDF',
legend=False)
def MakeNormalCdf():
"""Generates a plot of the normal CDF."""
thinkplot.PrePlot(3)
mus = [1.0, 2.0, 3.0]
sigmas = [0.5, 0.4, 0.3]
for mu, sigma in zip(mus, sigmas):
xs, ps = thinkstats2.RenderNormalCdf(mu=mu, sigma=sigma,
low=-1.0, high=4.0)
label = r'$\mu=%g$, $\sigma=%g$' % (mu, sigma)
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_normal_cdf',
title='Normal CDF',
xlabel='x',
ylabel='CDF',
loc=2)
def MakeNormalModel(weights):
"""Plot the CDF of birthweights with a normal model."""
# estimate parameters: trimming outliers yields a better fit
mu, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
print('Mean, Var', mu, var)
# plot the model
sigma = math.sqrt(var)
print('Sigma', sigma)
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=12.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
# plot the data
cdf = thinkstats2.Cdf(weights, label='data')
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf)
thinkplot.Save(root='analytic_birthwgt_model',
title='Birth weights',
xlabel='birth weight (lbs)',
ylabel='CDF')
def MakeExampleNormalPlot():
"""Generates a sample normal probability plot.
"""
n = 1000
thinkplot.PrePlot(3)
mus = [0, 1, 5]
sigmas = [1, 1, 2]
for mu, sigma in zip(mus, sigmas):
sample = np.random.normal(mu, sigma, n)
xs, ys = thinkstats2.NormalProbability(sample)
label = '$\mu=%d$, $\sigma=%d$' % (mu, sigma)
thinkplot.Plot(xs, ys, label=label)
thinkplot.Save(root='analytic_normal_prob_example',
title='Normal probability plot',
xlabel='standard normal sample',
ylabel='sample values')
def MakeNormalPlot(weights, term_weights):
"""Generates a normal probability plot of birth weights."""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color='0.8')
thinkplot.PrePlot(2)
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='all live')
xs, ys = thinkstats2.NormalProbability(term_weights)
thinkplot.Plot(xs, ys, label='full term')
thinkplot.Save(root='analytic_birthwgt_normal',
title='Normal probability plot',
xlabel='Standard deviations from mean',
ylabel='Birth weight (lbs)')
def main():
thinkstats2.RandomSeed(18)
MakeExampleNormalPlot()
# make the analytic CDFs
MakeExpoCdf()
MakeBabyBoom()
MakeParetoCdf()
MakeParetoCdf2()
MakeNormalCdf()
# test the distribution of birth weights for normality
preg = nsfg.ReadFemPreg()
full_term = preg[preg.prglngth >= 37]
weights = preg.totalwgt_lb.dropna()
term_weights = full_term.totalwgt_lb.dropna()
MakeNormalModel(weights)
MakeNormalPlot(weights, term_weights)
if __name__ == "__main__":
main()
| gpl-3.0 |
ownport/jira-reports | jirareports/vendor/requests/packages/urllib3/response.py | 150 | 22662 | from __future__ import absolute_import
from contextlib import contextmanager
import zlib
import io
import logging
from socket import timeout as SocketTimeout
from socket import error as SocketError
from ._collections import HTTPHeaderDict
from .exceptions import (
BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError,
ResponseNotChunked, IncompleteRead, InvalidHeader
)
from .packages.six import string_types as basestring, binary_type, PY3
from .packages.six.moves import http_client as httplib
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed, is_response_to_head
log = logging.getLogger(__name__)
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
:param retries:
The retries contains the last :class:`~urllib3.util.retry.Retry` that
was used during the request.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None,
retries=None, enforce_content_length=False, request_method=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self.retries = retries
self.enforce_content_length = enforce_content_length
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get('transfer-encoding', '').lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
# Determine length of response
self.length_remaining = self._init_length(request_method)
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
@property
def connection(self):
return self._connection
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_length(self, request_method):
"""
Set initial length value for Response content if available.
"""
length = self.headers.get('content-length')
if length is not None and self.chunked:
# This Response will fail with an IncompleteRead if it can't be
# received as chunked. This method falls back to attempt reading
# the response before raising an exception.
log.warning("Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked.")
return None
elif length is not None:
try:
# RFC 7230 section 3.3.2 specifies multiple content lengths can
# be sent in a single Content-Length header
# (e.g. Content-Length: 42, 42). This line ensures the values
# are all valid ints and that as long as the `set` length is 1,
# all values are the same. Otherwise, the header is invalid.
lengths = set([int(val) for val in length.split(',')])
if len(lengths) > 1:
raise InvalidHeader("Content-Length contained multiple "
"unmatching values (%s)" % length)
length = lengths.pop()
except ValueError:
length = None
else:
if length < 0:
length = None
# Convert status to int for comparison
# In some cases, httplib returns a status of "_UNKNOWN"
try:
status = int(self.status)
except ValueError:
status = 0
# Check for responses that shouldn't include a body
if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
length = 0
return length
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessary.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
content_encoding = self.headers.get('content-encoding', '').lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content:
data += self._flush_decoder()
return data
def _flush_decoder(self):
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
buf = self._decoder.decompress(b'')
return buf + self._decoder.flush()
return b''
@contextmanager
def _error_catcher(self):
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except (HTTPException, SocketError) as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
data = None
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if self.enforce_content_length and self.length_remaining not in (0, None):
# This is an edge case that httplib failed to cover due
# to concerns of backward compatibility. We're
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked and self.supports_chunked_reads():
for line in self.read_chunked(amt, decode_content=decode_content):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
if self._connection:
self._connection.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'isclosed'):
return self._fp.isclosed()
elif hasattr(self._fp, 'closed'):
return self._fp.closed
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
def supports_chunked_reads(self):
"""
Checks if the underlying file-like object looks like a
httplib.HTTPResponse object. We do this by testing for the fp
attribute. If it is present we assume it returns raw chunks as
processed by read_chunked().
"""
return hasattr(self._fp, 'fp')
def _update_chunk_length(self):
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(line)
def _handle_chunk(self, amt):
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
returned_chunk = chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
if not self.supports_chunked_reads():
raise BodyNotHttplibCompatible(
"Body should be httplib.HTTPResponse like. "
"It should have have an fp attribute which returns raw chunks.")
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return
with self._error_catcher():
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(chunk, decode_content=decode_content,
flush_decoder=False)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
| mit |
efmflee/django-sspanel | ssserver/migrations/0001_initial.py | 1 | 4271 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-24 00:51
from __future__ import unicode_literals
import datetime
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import shadowsocks.tools
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SSUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('plan', models.CharField(choices=[('free', 'Free'), ('pro', 'pro')], default='Free', max_length=32, verbose_name='套餐')),
('last_check_in_time', models.DateTimeField(default=datetime.datetime(1970, 1, 1, 8, 0), editable=False, null=True, verbose_name='最后签到时间')),
('password', models.CharField(db_column='passwd', default=shadowsocks.tools.get_short_random_string, max_length=32, validators=[django.core.validators.MinLengthValidator(6)], verbose_name='Shadowsocks密码')),
('port', models.IntegerField(db_column='port', unique=True, verbose_name='端口')),
('last_use_time', models.IntegerField(db_column='t', default=0, editable=False, help_text='时间戳', verbose_name='最后使用时间')),
('upload_traffic', models.BigIntegerField(db_column='u', default=0, verbose_name='上传流量')),
('download_traffic', models.BigIntegerField(db_column='d', default=0, verbose_name='下载流量')),
('transfer_enable', models.BigIntegerField(db_column='transfer_enable', default=5368709120, verbose_name='总流量')),
('switch', models.BooleanField(db_column='switch', default=True, verbose_name='保留字段switch')),
('enable', models.BooleanField(db_column='enable', default=True, verbose_name='开启与否')),
('method', models.CharField(choices=[('aes-256-cfb', 'aes-256-cfb'), ('rc4-md5', 'rc4-md5'), ('salsa20', 'salsa20'), ('aes-128-ctr', 'aes-128-ctr')], default='aes-256-cfb', max_length=32, verbose_name='加密类型')),
('protocol', models.CharField(choices=[('auth_sha1_v4', 'auth_sha1_v4'), ('auth_aes128_md5', 'auth_aes128_md5'), ('auth_aes128_sha1', 'auth_aes128_sha1'), ('auth_chain_a', 'auth_chain_a'), ('origin', 'origin')], default='origin', max_length=32, verbose_name='协议')),
('obfs', models.CharField(choices=[('plain', 'plain'), ('http_simple', 'http_simple'), ('http_simple_compatible', 'http_simple_compatible'), ('http_post', 'http_post'), ('tls1.2_ticket_auth', 'tls1.2_ticket_auth')], default='plain', max_length=32, verbose_name='混淆')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='ss_user', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'SS账户',
'db_table': 'user',
'ordering': ('-last_check_in_time',),
},
),
migrations.CreateModel(
name='TrafficLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.IntegerField(verbose_name='用户id')),
('upload_traffic', models.BigIntegerField(db_column='u', default=0, verbose_name='上传流量')),
('download_traffic', models.BigIntegerField(db_column='d', default=0, verbose_name='下载流量')),
('node_id', models.IntegerField(verbose_name='节点id')),
('rate', models.FloatField(default=1.0, verbose_name='流量比例')),
('traffic', models.CharField(max_length=32, verbose_name='流量记录')),
('log_time', models.IntegerField(verbose_name='日志时间')),
],
options={
'verbose_name_plural': '流量记录',
'db_table': 'user_traffic_log',
'ordering': ('-log_time',),
},
),
]
| gpl-3.0 |
laperry1/android_external_chromium_org | tools/android/memdump/memsymbols.py | 170 | 4857 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import os
import sys
import re
from optparse import OptionParser
"""Extracts the list of resident symbols of a library loaded in a process.
This scripts combines the extended output of memdump for a given process
(obtained through memdump -x PID) and the symbol table of a .so loaded in that
process (obtained through nm -C lib-with-symbols.so), filtering out only those
symbols that, at the time of the snapshot, were resident in memory (that are,
the symbols which start address belongs to a mapped page of the .so which was
resident at the time of the snapshot).
The aim is to perform a "code coverage"-like profiling of a binary, intersecting
run-time information (list of resident pages) and debug symbols.
"""
_PAGE_SIZE = 4096
def _TestBit(word, bit):
assert(bit >= 0 and bit < 8)
return not not ((word >> bit) & 1)
def _HexAddr(addr):
return hex(addr)[2:].zfill(8)
def _GetResidentPagesSet(memdump_contents, lib_name, verbose):
"""Parses the memdump output and extracts the resident page set for lib_name.
Args:
memdump_contents: Array of strings (lines) of a memdump output.
lib_name: A string containing the name of the library.so to be matched.
verbose: Print a verbose header for each mapping matched.
Returns:
A set of resident pages (the key is the page index) for all the
mappings matching .*lib_name.
"""
resident_pages = set()
MAP_RX = re.compile(
r'^([0-9a-f]+)-([0-9a-f]+) ([\w-]+) ([0-9a-f]+) .* "(.*)" \[(.*)\]$')
for line in memdump_contents:
line = line.rstrip('\r\n')
if line.startswith('[ PID'):
continue
r = MAP_RX.match(line)
if not r:
sys.stderr.write('Skipping %s from %s\n' % (line, memdump_file))
continue
map_start = int(r.group(1), 16)
map_end = int(r.group(2), 16)
prot = r.group(3)
offset = int(r.group(4), 16)
assert(offset % _PAGE_SIZE == 0)
lib = r.group(5)
enc_bitmap = r.group(6)
if not lib.endswith(lib_name):
continue
bitmap = base64.b64decode(enc_bitmap)
map_pages_count = (map_end - map_start + 1) / _PAGE_SIZE
bitmap_pages_count = len(bitmap) * 8
if verbose:
print 'Found %s: mapped %d pages in mode %s @ offset %s.' % (
lib, map_pages_count, prot, _HexAddr(offset))
print ' Map range in the process VA: [%s - %s]. Len: %s' % (
_HexAddr(map_start),
_HexAddr(map_end),
_HexAddr(map_pages_count * _PAGE_SIZE))
print ' Corresponding addresses in the binary: [%s - %s]. Len: %s' % (
_HexAddr(offset),
_HexAddr(offset + map_end - map_start),
_HexAddr(map_pages_count * _PAGE_SIZE))
print ' Bitmap: %d pages' % bitmap_pages_count
print ''
assert(bitmap_pages_count >= map_pages_count)
for i in xrange(map_pages_count):
bitmap_idx = i / 8
bitmap_off = i % 8
if (bitmap_idx < len(bitmap) and
_TestBit(ord(bitmap[bitmap_idx]), bitmap_off)):
resident_pages.add(offset / _PAGE_SIZE + i)
return resident_pages
def main(argv):
NM_RX = re.compile(r'^([0-9a-f]+)\s+.*$')
parser = OptionParser()
parser.add_option("-r", "--reverse",
action="store_true", dest="reverse", default=False,
help="Print out non present symbols")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="Print out verbose debug information.")
(options, args) = parser.parse_args()
if len(args) != 3:
print 'Usage: %s [-v] memdump.file nm.file library.so' % (
os.path.basename(argv[0]))
return 1
memdump_file = args[0]
nm_file = args[1]
lib_name = args[2]
if memdump_file == '-':
memdump_contents = sys.stdin.readlines()
else:
memdump_contents = open(memdump_file, 'r').readlines()
resident_pages = _GetResidentPagesSet(memdump_contents,
lib_name,
options.verbose)
# Process the nm symbol table, filtering out the resident symbols.
nm_fh = open(nm_file, 'r')
for line in nm_fh:
line = line.rstrip('\r\n')
# Skip undefined symbols (lines with no address).
if line.startswith(' '):
continue
r = NM_RX.match(line)
if not r:
sys.stderr.write('Skipping %s from %s\n' % (line, nm_file))
continue
sym_addr = int(r.group(1), 16)
sym_page = sym_addr / _PAGE_SIZE
last_sym_matched = (sym_page in resident_pages)
if (sym_page in resident_pages) != options.reverse:
print line
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
lamby/jenkins.debian.net | bin/reproducible_html_indexes.py | 2 | 32086 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright © 2015 Mattia Rizzolo <mattia@mapreri.org>
# Copyright © 2015-2016 Holger Levsen <holger@layer-acht.org>
# Based on reproducible_html_indexes.sh © 2014 Holger Levsen <holger@layer-acht.org>
# Licensed under GPL-2
#
# Depends: python3
#
# Build quite all index_* pages
from reproducible_common import *
from sqlalchemy import select, and_, or_, func, bindparam, desc
"""
Reference doc for the folowing lists:
* queries is just a list of queries. They are referred further below.
+ every query must return only a list of package names (excpet count_total)
* pages is just a list of pages. It is actually a dictionary, where every
element is a page. Every page has:
+ `title`: The page title
+ `header`: (optional) sane html to be printed on top of the page
+ `header_query`: (optional): the output of this query is put inside "tot" of
the string above
+ `body`: a list of dicts containing every section that made up the page.
Every section has:
- `icon_status`: the name of a icon (see get_status_icon())
- `icon_link`: a link to hide below the icon
- `query`: query to perform against the reproducible db to get the list of
packages to show
- `text` a string. Template instance with $tot (total of packages listed)
and $percent (percentage of all packages)
- `timespan`: value set to '24' or '48' to enable to add $count, $count_total.
$timespan_cound and $timespan_percent to the text, where:
* $percent becomes count/count_total
* $count_total being the number of all tested packages
* $count being the len() of the query indicated by `query2`
* $timespan_count is the number of packages tested in that timespan in hours
* $timespan_percent is the percentage of $query in that timespan
- `query2`: useful only if `timespan` is set to a value.
- `nosuite`: if true do not iterate over the suite/archs, use only the
current suite+arch
+ global: if true, then the page will saved on the root of rb.d.n, and:
- the query also takes the value "status"
- force the suite/arch to the defaults
+ notes: if true the query also takes the value "status"
Technically speaking, a page can be empty (we all love nonsense) but every
section must have at least a `query` defining what to file in.
"""
timespan_date_map = {}
timespan_date_map[24] = (datetime.now()-timedelta(hours=24)).strftime('%Y-%m-%d %H:%M')
timespan_date_map[48] = (datetime.now()-timedelta(hours=48)).strftime('%Y-%m-%d %H:%M')
# sqlalchemy table definitions needed for queries
results = db_table('results')
sources = db_table('sources')
notes = db_table('notes')
# filtered_issues is defined in reproducible_common.py and
# can be used to excludes some FTBFS issues
filter_issues_list = []
for issue in filtered_issues:
filter_issues_list.append(notes.c.issues.contains(issue))
if not filtered_issues:
filter_issues_list = [None]
count_results = select(
[func.count(results.c.id)]
).select_from(
results.join(sources)
).where(
and_(
sources.c.suite == bindparam('suite'),
sources.c.architecture == bindparam('arch')
)
)
select_sources = select(
[sources.c.name]
).select_from(
results.join(sources)
).where(
and_(
sources.c.suite == bindparam('suite'),
sources.c.architecture == bindparam('arch')
)
)
queries = {
"count_total": count_results,
"count_timespan":
count_results.where(
results.c.build_date > bindparam('timespan_date'),
),
"reproducible_all":
select_sources.where(
results.c.status == 'reproducible',
).order_by(
desc(results.c.build_date)
),
"reproducible_last24h":
select_sources.where(
and_(
results.c.status == 'reproducible',
results.c.build_date > timespan_date_map[24],
)
).order_by(
desc(results.c.build_date)
),
"reproducible_last48h":
select_sources.where(
and_(
results.c.status == 'reproducible',
results.c.build_date > timespan_date_map[48],
)
).order_by(
desc(results.c.build_date)
),
"reproducible_all_abc":
select_sources.where(
results.c.status == 'reproducible',
).order_by(
sources.c.name
),
"FTBR_all":
select_sources.where(
results.c.status == 'unreproducible',
).order_by(
desc(results.c.build_date)
),
"FTBR_last24h":
select_sources.where(
and_(
results.c.status == 'unreproducible',
results.c.build_date > timespan_date_map[24],
)
).order_by(
desc(results.c.build_date)
),
"FTBR_last48h":
select_sources.where(
and_(
results.c.status == 'unreproducible',
results.c.build_date > timespan_date_map[48],
)
).order_by(
desc(results.c.build_date)
),
"FTBR_all_abc":
select_sources.where(
results.c.status == 'unreproducible',
).order_by(
sources.c.name
),
"FTBFS_all":
select_sources.where(
results.c.status == 'FTBFS',
).order_by(
desc(results.c.build_date)
),
"FTBFS_last24h":
select_sources.where(
and_(
results.c.status == 'FTBFS',
results.c.build_date > timespan_date_map[24],
)
).order_by(
desc(results.c.build_date)
),
"FTBFS_last48h":
select_sources.where(
and_(
results.c.status == 'FTBFS',
results.c.build_date > timespan_date_map[48],
)
).order_by(
desc(results.c.build_date)
),
"FTBFS_all_abc":
select_sources.where(
results.c.status == 'FTBFS',
).order_by(
sources.c.name
),
"FTBFS_filtered":
select_sources.where(
and_(
results.c.status == 'FTBFS',
sources.c.id.notin_(
select(
[notes.c.package_id]
).select_from(
notes
).where(
or_(*filter_issues_list)
)
)
)
).order_by(
desc(results.c.build_date)
),
"FTBFS_caused_by_us":
select_sources.where(
and_(
results.c.status == 'FTBFS',
sources.c.id.in_(
select(
[notes.c.package_id]
).select_from(
notes
).where(
or_(*filter_issues_list)
)
)
)
).order_by(
desc(results.c.build_date)
),
"404_all":
select_sources.where(
results.c.status == '404',
).order_by(
desc(results.c.build_date)
),
"404_all_abc":
select_sources.where(
results.c.status == '404',
).order_by(
sources.c.name
),
"depwait_all":
select_sources.where(
results.c.status == 'depwait',
).order_by(
desc(results.c.build_date)
),
"depwait_all_abc":
select_sources.where(
results.c.status == 'depwait',
).order_by(
sources.c.name
),
"depwait_last24h":
select_sources.where(
and_(
results.c.status == 'depwait',
results.c.build_date > timespan_date_map[24],
)
).order_by(
desc(results.c.build_date)
),
"depwait_last48h":
select_sources.where(
and_(
results.c.status == 'depwait',
results.c.build_date > timespan_date_map[48],
)
).order_by(
desc(results.c.build_date)
),
"not_for_us_all":
select_sources.where(
and_(
results.c.status == 'not for us',
)
).order_by(
sources.c.name
),
"blacklisted_all":
select_sources.where(
results.c.status == 'blacklisted',
).order_by(
sources.c.name
),
"notes":
select(
[sources.c.name]
).select_from(
sources.join(results).join(notes)
).where(
and_(
results.c.status == bindparam('status'),
sources.c.suite == bindparam('suite'),
sources.c.architecture == bindparam('arch')
)
).order_by(
desc(results.c.build_date)
),
"no_notes":
select_sources.where(
and_(
results.c.status == bindparam('status'),
sources.c.id.notin_(select([notes.c.package_id]).select_from(notes))
)
).order_by(
desc(results.c.build_date)
),
"notification":
select_sources.where(
and_(
results.c.status == bindparam('status'),
sources.c.notify_maintainer == 1
)
).order_by(
desc(results.c.build_date)
),
}
pages = {
'reproducible': {
'title': 'Packages in {suite}/{arch} which built reproducibly',
'body': [
{
'icon_status': 'reproducible',
'icon_link': '/index_reproducible.html',
'query': 'reproducible_all',
'text': Template('$tot ($percent%) packages which built reproducibly in $suite/$arch:')
}
]
},
'FTBR': {
'title': 'Packages in {suite}/{arch} which failed to build reproducibly',
'body': [
{
'icon_status': 'FTBR',
'query': 'FTBR_all',
'text': Template('$tot ($percent%) packages which failed to build reproducibly in $suite/$arch:')
}
]
},
'FTBFS': {
'title': 'Packages in {suite}/{arch} which failed to build from source',
'body': [
{
'icon_status': 'FTBFS',
'query': 'FTBFS_filtered',
'text': Template('$tot ($percent%) packages which failed to build from source in $suite/$arch: (this list is filtered and only shows unexpected ftbfs issues - see the list below for expected failures.)')
},
{
'icon_status': 'FTBFS',
'query': 'FTBFS_caused_by_us',
'text': Template('$tot ($percent%) packages which failed to build from source in $suite/$arch due to our changes in the toolchain or due to our setup.\n This list includes packages tagged ' + filter_html + '.'),
}
]
},
'404': {
'title': 'Packages in {suite}/{arch} where the sources failed to download',
'body': [
{
'icon_status': '404',
'query': '404_all',
'text': Template('$tot ($percent%) packages where the sources failed to download in $suite/$arch:')
}
]
},
'depwait': {
'title': 'Packages in {suite}/{arch} where the build dependencies failed to be satisfied',
'body': [
{
'icon_status': 'depwait',
'query': 'depwait_all',
'text': Template('$tot ($percent%) packages where the build dependencies failed to be satisfied. Note that temporary failures (eg. due to network problems) are automatically rescheduled every 4 hours.')
}
]
},
'not_for_us': {
'title': 'Packages in {suite}/{arch} which should not be build on "{arch}"',
'body': [
{
'icon_status': 'not_for_us',
'query': 'not_for_us_all',
'text': Template('$tot ($percent%) packages which should not be build in $suite/$arch:')
}
]
},
'blacklisted': {
'title': 'Packages in {suite}/{arch} which have been blacklisted',
'body': [
{
'icon_status': 'blacklisted',
'query': 'blacklisted_all',
'text': Template('$tot ($percent%) packages which have been blacklisted in $suite/$arch: (If you see packages listed here without a bug filed against them, it \'s probably a good idea to file one.)')
}
]
},
'all_abc': {
'title': 'Alphabetically sorted overview of all tested packages in {suite}/{arch}',
'body': [
{
'icon_status': 'FTBR',
'icon_link': '/index_unreproducible.html',
'query': 'FTBR_all_abc',
'text': Template('$tot packages ($percent%) failed to build reproducibly in total in $suite/$arch:')
},
{
'icon_status': 'FTBFS',
'icon_link': '/index_FTBFS.html',
'query': 'FTBFS_all_abc',
'text': Template('$tot packages ($percent%) failed to build from source in total $suite/$arch:')
},
{
'icon_status': 'not_for_us',
'icon_link': '/index_not_for_us.html',
'query': 'not_for_us_all',
'text': Template('$tot ($percent%) packages which should not be build in $suite/$arch:')
},
{
'icon_status': '404',
'icon_link': '/index_404.html',
'query': '404_all_abc',
'text': Template('$tot ($percent%) source packages could not be downloaded in $suite/$arch:')
},
{
'icon_status': 'depwait',
'icon_link': '/index_depwait.html',
'query': 'depwait_all_abc',
'text': Template('$tot ($percent%) source packages failed to satisfy their build-dependencies:')
},
{
'icon_status': 'blacklisted',
'icon_link': '/index_blacklisted.html',
'query': 'blacklisted_all',
'text': Template('$tot ($percent%) packages are blacklisted and will not be tested in $suite/$arch:')
},
{
'icon_status': 'reproducible',
'icon_link': '/index_reproducible.html',
'query': 'reproducible_all_abc',
'text': Template('$tot ($percent%) packages successfully built reproducibly in $suite/$arch:')
},
]
},
'last_24h': {
'title': 'Packages in {suite}/{arch} tested in the last 24h for build reproducibility',
'body': [
{
'icon_status': 'FTBR',
'icon_link': '/index_unreproducible.html',
'query': 'FTBR_last24h',
'query2': 'FTBR_all',
'text': Template('$count packages ($percent% of ${count_total}) ' +
'failed to build reproducibly in total, $tot ($timespan_percent% of $timespan_count) of them in the last 24h in $suite/$arch:'),
'timespan': 24
},
{
'icon_status': 'FTBFS',
'icon_link': '/index_FTBFS.html',
'query': 'FTBFS_last24h',
'query2': 'FTBFS_all',
'text': Template('$count packages ($percent% of ${count_total}) ' +
'failed to build from source in total, $tot ($timespan_percent% of $timespan_count) of them in the last 24h in $suite/$arch:'),
'timespan': 24
},
{
'icon_status': 'depwait',
'icon_link': '/index_depwait.html',
'query': 'depwait_last24h',
'query2': 'depwait_all',
'text': Template('$count packages ($percent% of ${count_total}) ' +
'failed to satisfy their build-dependencies, $tot ($timespan_percent% of $timespan_count) of them in the last 24h in $suite/$arch:'),
'timespan': 24
},
{
'icon_status': 'reproducible',
'icon_link': '/index_reproducible.html',
'query': 'reproducible_last24h',
'query2': 'reproducible_all',
'text': Template('$count packages ($percent% of ${count_total}) ' +
'successfully built reproducibly in total, $tot ($timespan_percent% of $timespan_count) of them in the last 24h in $suite/$arch:'),
'timespan': 24
},
]
},
'last_48h': {
'title': 'Packages in {suite}/{arch} tested in the last 48h for build reproducibility',
'body': [
{
'icon_status': 'FTBR',
'icon_link': '/index_unreproducible.html',
'query': 'FTBR_last48h',
'query2': 'FTBR_all',
'text': Template('$count packages ($percent% of ${count_total}) ' +
'failed to build reproducibly in total, $tot ($timespan_percent% of $timespan_count) of them in the last 48h in $suite/$arch:'),
'timespan': 48
},
{
'icon_status': 'FTBFS',
'icon_link': '/index_FTBFS.html',
'query': 'FTBFS_last48h',
'query2': 'FTBFS_all',
'text': Template('$count packages ($percent% of ${count_total}) ' +
'failed to build from source in total, $tot ($timespan_percent% of $timespan_count) of them in the last 48h in $suite/$arch:'),
'timespan': 48
},
{
'icon_status': 'depwait',
'icon_link': '/index_depwait.html',
'query': 'depwait_last48h',
'query2': 'depwait_all',
'text': Template('$count packages ($percent% of ${count_total}) ' +
'failed to satisfy their build-dependencies, $tot ($timespan_percent% of $timespan_count) of them in the last 48h in $suite/$arch:'),
'timespan': 48
},
{
'icon_status': 'reproducible',
'icon_link': '/index_reproducible.html',
'query': 'reproducible_last48h',
'query2': 'reproducible_all',
'text': Template('$count packages ($percent% of ${count_total}) ' +
'successfully built reproducibly in total, $tot ($timespan_percent% of $timespan_count) of them in the last 48h in $suite/$arch:'),
'timespan': 48
},
]
},
'notes': {
'notes': True,
'title': 'Packages with notes',
'header': '<p>There are {tot} packages with notes in {suite}/{arch}.</p>',
'header_query': "SELECT count(*) FROM (SELECT s.name FROM sources AS s JOIN notes AS n ON n.package_id=s.id WHERE s.suite='{suite}' AND s.architecture='{arch}' GROUP BY s.name) AS tmp",
'body': [
{
'icon_status': 'FTBR',
'db_status': 'unreproducible',
'icon_link': '/index_FTBR.html',
'query': 'notes',
'nosuite': True,
'text': Template('$tot unreproducible packages in $suite/$arch, ordered by build date:')
},
{
'icon_status': 'FTBFS',
'db_status': 'FTBFS',
'icon_link': '/index_FTBFS.html',
'query': 'notes',
'nosuite': True,
'text': Template('$tot FTBFS packages in $suite/$arch, ordered by build date:')
},
{
'icon_status': 'depwait',
'db_status': 'depwait',
'icon_link': '/index_depwait.html',
'query': 'depwait_all_abc',
'text': Template('$tot ($percent%) source packages failed to satisfy their build-dependencies, ordered by build date:')
},
{
'icon_status': 'not_for_us',
'db_status': 'not for us',
'icon_link': '/index_not_for_us.html',
'query': 'notes',
'nosuite': True,
'text': Template('$tot not for us packages in $suite/$arch:')
},
{
'icon_status': 'blacklisted',
'db_status': 'blacklisted',
'icon_link': '/index_blacklisted.html',
'query': 'notes',
'nosuite': True,
'text': Template('$tot blacklisted packages in $suite/$arch:')
},
{
'icon_status': 'reproducible',
'db_status': 'reproducible',
'icon_link': '/index_reproducible.html',
'query': 'notes',
'nosuite': True,
'text': Template('$tot reproducible packages in $suite/$arch:')
}
]
},
'no_notes': {
'notes': True,
'notes_hint': True,
'title': 'Packages without notes',
'header': '<p>There are {tot} faulty packages without notes in {suite}/{arch}.{hint}</p>',
'header_query': "SELECT COUNT(*) FROM (SELECT s.id FROM sources AS s JOIN results AS r ON r.package_id=s.id WHERE r.status IN ('unreproducible', 'FTBFS', 'blacklisted') AND s.id NOT IN (SELECT package_id FROM notes) AND s.suite='{suite}' AND s.architecture='{arch}') AS tmp",
'body': [
{
'icon_status': 'FTBR',
'db_status': 'unreproducible',
'icon_link': '/index_FTBR.html',
'query': 'no_notes',
'text': Template('$tot unreproducible packages in $suite/$arch, ordered by build date:')
},
{
'icon_status': 'FTBFS',
'db_status': 'FTBFS',
'icon_link': '/index_FTBFS.html',
'query': 'no_notes',
'text': Template('$tot FTBFS packages in $suite/$arch, ordered by build date:')
},
{
'icon_status': 'blacklisted',
'db_status': 'blacklisted',
'icon_link': '/index_blacklisted.html',
'query': 'no_notes',
'text': Template('$tot blacklisted packages in $suite/$arch, ordered by name:')
}
]
},
'notify': {
'global': True,
'notes': True,
'nosuite': True,
'title': 'Packages with notification enabled',
'header': '<p>The following {tot} packages have notifications enabled. (This page only shows packages in {suite}/{arch} though notifications are send for these packages in unstable and experimental in all tested architectures.) On status changes (e.g. reproducible → unreproducible) the system notifies the maintainer and relevant parties via an email to $srcpackage@packages.debian.org. Notifications are collected and send once a day to avoid flooding.<br />Please ask us to enable notifications for your package(s) in our IRC channel #debian-reproducible or via <a href="mailto:reproducible-builds@lists.alioth.debian.org">mail</a> - but ask your fellow team members first if they want to receive such notifications.</p>',
'header_query': "SELECT COUNT(*) FROM sources WHERE suite='{suite}' AND architecture='{arch}' AND notify_maintainer = 1",
'body': [
{
'icon_status': 'FTBR',
'db_status': 'unreproducible',
'icon_link': '/index_FTBR.html',
'query': 'notification',
'text': Template('$tot unreproducible packages in $suite/$arch:'),
'nosuite': True
},
{
'icon_status': 'FTBFS',
'db_status': 'FTBFS',
'icon_link': '/index_FTBFS.html',
'query': 'notification',
'text': Template('$tot FTBFS packages in $suite/$arch:'),
'nosuite': True
},
{
'icon_status': 'reproducible',
'db_status': 'reproducible',
'icon_link': '/index_reproducible.html',
'query': 'notification',
'text': Template('$tot reproducible packages in $suite/$arch:'),
'nosuite': True
}
]
}
}
def build_leading_text_section(section, rows, suite, arch):
html = '<p>\n' + tab
total = len(rows)
count_total = int(query_db(queries['count_total'].params({'suite': suite, 'arch': arch}))[0][0])
try:
percent = round(((total/count_total)*100), 1)
except ZeroDivisionError:
log.error('Looks like there are either no tested package or no ' +
'packages available at all. Maybe it\'s a new database?')
percent = 0.0
try:
html += '<a href="' + section['icon_link'] + '" target="_parent">'
no_icon_link = False
except KeyError:
no_icon_link = True # to avoid closing the </a> tag below
if section.get('icon_status'):
html += '<img src="/static/'
html += get_status_icon(section['icon_status'])[1]
html += '" alt="reproducible icon" />'
if not no_icon_link:
html += '</a>'
html += '\n' + tab
if section.get('text') and section.get('timespan'):
count = len(query_db(queries[section['query2']].params(
{'suite': suite, 'arch': arch})))
percent = round(((count/count_total)*100), 1)
timespan = section['timespan']
timespan_date = timespan_date_map[timespan]
timespan_count = int(query_db(queries['count_timespan'].params(
{'suite': suite, 'arch': arch, 'timespan_date': timespan_date}))[0][0])
try:
timespan_percent = round(((total/timespan_count)*100), 1)
except ZeroDivisionError:
log.error('Looks like there are either no tested package or no ' +
'packages available at all. Maybe it\'s a new database?')
timespan_percent = 0
html += section['text'].substitute(tot=total, percent=percent,
timespan_percent=timespan_percent,
timespan_count=timespan_count,
count_total=count_total,
count=count, suite=suite, arch=arch)
elif section.get('text'):
html += section['text'].substitute(tot=total, percent=percent,
suite=suite, arch=arch)
else:
log.warning('There is no text for this section')
html += '\n</p>\n'
return html
def build_page_section(page, section, suite, arch):
try:
if pages[page].get('global') and pages[page]['global']:
suite = defaultsuite
arch = defaultarch
if pages[page].get('notes') and pages[page]['notes']:
query = queries[section['query']].params({
'status': section['db_status'], 'suite': suite, 'arch': arch})
else:
query = queries[section['query']].params({'suite': suite, 'arch': arch})
rows = query_db(query)
except:
print_critical_message('A query failed: %s' % query)
raise
html = ''
footnote = True if rows else False
if not rows: # there are no package in this set
log.debug('empty query: %s' % query) # do not output anything.
return (html, footnote)
html += build_leading_text_section(section, rows, suite, arch)
html += '<p>\n' + tab + '<code>\n'
for row in rows:
pkg = row[0]
html += tab*2 + link_package(pkg, suite, arch, bugs)
else:
html += tab + '</code>\n'
html += '</p>'
if section.get('bottom'):
html += section['bottom']
html = (tab*2).join(html.splitlines(True))
return (html, footnote)
def build_page(page, suite=None, arch=None):
gpage = False
if pages[page].get('global') and pages[page]['global']:
gpage = True
suite = defaultsuite
arch = defaultarch
if not gpage and suite and not arch:
print_critical_message('The architecture was not specified while ' +
'building a suite-specific page.')
sys.exit(1)
if gpage:
log.debug('Building the ' + page + ' global index page...')
title = pages[page]['title']
else:
log.debug('Building the ' + page + ' index page for ' + suite + '/' +
arch + '...')
title = pages[page]['title'].format(suite=suite, arch=arch)
page_sections = pages[page]['body']
html = ''
footnote = False
if pages[page].get('header'):
if pages[page].get('notes_hint') and pages[page]['notes_hint'] and suite == defaultsuite:
hint = ' <em>These</em> are the packages with failures that <em>still need to be investigated</em>.'
else:
hint = ''
if pages[page].get('header_query'):
html += pages[page]['header'].format(
tot=query_db(pages[page]['header_query'].format(suite=suite, arch=arch))[0][0], suite=suite, arch=arch, hint=hint)
else:
html += pages[page].get('header')
for section in page_sections:
if gpage:
if section.get('nosuite') and section['nosuite']: # only defaults
html += build_page_section(page, section, suite, arch)[0]
else:
for suite in SUITES:
for arch in ARCHS:
log.debug('global page §' + section['db_status'] +
' in ' + page + ' for ' + suite + '/' + arch)
html += build_page_section(page, section, suite, arch)[0]
footnote = True
else:
html1, footnote1 = build_page_section(page, section, suite, arch)
html += html1
footnote = True if footnote1 else footnote
suite_arch_nav_template = None
if gpage:
destfile = DEBIAN_BASE + '/index_' + page + '.html'
desturl = DEBIAN_URL + '/index_' + page + '.html'
suite = defaultsuite # used for the links in create_main_navigation
else:
destfile = DEBIAN_BASE + '/' + suite + '/' + arch + '/index_' + \
page + '.html'
desturl = DEBIAN_URL + '/' + suite + '/' + arch + '/index_' + \
page + '.html'
suite_arch_nav_template = DEBIAN_URI + '/{{suite}}/{{arch}}/index_' + \
page + '.html'
left_nav_html = create_main_navigation(
suite=suite,
arch=arch,
displayed_page=page,
suite_arch_nav_template=suite_arch_nav_template,
)
write_html_page(title=title, body=html, destfile=destfile, style_note=True,
left_nav_html=left_nav_html)
log.info('"' + title + '" now available at ' + desturl)
bugs = get_bugs() # this variable should not be global, else merely importing _html_indexes always queries UDD
if __name__ == '__main__':
for arch in ARCHS:
for suite in SUITES:
for page in pages.keys():
if 'global' not in pages[page] or not pages[page]['global']:
build_page(page, suite, arch)
for page in pages.keys():
if 'global' in pages[page] and pages[page]['global']:
build_page(page)
| gpl-2.0 |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/projections/__init__.py | 21 | 3371 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from .geo import AitoffAxes, HammerAxes, LambertAxes, MollweideAxes
from .polar import PolarAxes
from matplotlib import axes
class ProjectionRegistry(object):
"""
Manages the set of projections available to the system.
"""
def __init__(self):
self._all_projection_types = {}
def register(self, *projections):
"""
Register a new set of projection(s).
"""
for projection in projections:
name = projection.name
self._all_projection_types[name] = projection
def get_projection_class(self, name):
"""
Get a projection class from its *name*.
"""
return self._all_projection_types[name]
def get_projection_names(self):
"""
Get a list of the names of all projections currently
registered.
"""
names = list(six.iterkeys(self._all_projection_types))
names.sort()
return names
projection_registry = ProjectionRegistry()
projection_registry.register(
axes.Axes,
PolarAxes,
AitoffAxes,
HammerAxes,
LambertAxes,
MollweideAxes)
def register_projection(cls):
projection_registry.register(cls)
def get_projection_class(projection=None):
"""
Get a projection class from its name.
If *projection* is None, a standard rectilinear projection is
returned.
"""
if projection is None:
projection = 'rectilinear'
try:
return projection_registry.get_projection_class(projection)
except KeyError:
raise ValueError("Unknown projection '%s'" % projection)
def process_projection_requirements(figure, *args, **kwargs):
"""
Handle the args/kwargs to for add_axes/add_subplot/gca,
returning::
(axes_proj_class, proj_class_kwargs, proj_stack_key)
Which can be used for new axes initialization/identification.
.. note:: **kwargs** is modified in place.
"""
ispolar = kwargs.pop('polar', False)
projection = kwargs.pop('projection', None)
if ispolar:
if projection is not None and projection != 'polar':
raise ValueError(
"polar=True, yet projection=%r. "
"Only one of these arguments should be supplied." %
projection)
projection = 'polar'
# ensure that the resolution keyword is always put into the key
# for polar plots
if projection == 'polar':
kwargs.setdefault('resolution', 1)
if isinstance(projection, six.string_types) or projection is None:
projection_class = get_projection_class(projection)
elif hasattr(projection, '_as_mpl_axes'):
projection_class, extra_kwargs = projection._as_mpl_axes()
kwargs.update(**extra_kwargs)
else:
raise TypeError('projection must be a string, None or implement a '
'_as_mpl_axes method. Got %r' % projection)
# Make the key without projection kwargs, this is used as a unique
# lookup for axes instances
key = figure._make_key(*args, **kwargs)
return projection_class, kwargs, key
def get_projection_names():
"""
Get a list of acceptable projection names.
"""
return projection_registry.get_projection_names()
| mit |
Matt-Deacalion/django | tests/ordering/tests.py | 301 | 10033 | from __future__ import unicode_literals
from datetime import datetime
from operator import attrgetter
from django.db.models import F
from django.test import TestCase
from .models import Article, Author, Reference
class OrderingTests(TestCase):
def setUp(self):
self.a1 = Article.objects.create(
headline="Article 1", pub_date=datetime(2005, 7, 26)
)
self.a2 = Article.objects.create(
headline="Article 2", pub_date=datetime(2005, 7, 27)
)
self.a3 = Article.objects.create(
headline="Article 3", pub_date=datetime(2005, 7, 27)
)
self.a4 = Article.objects.create(
headline="Article 4", pub_date=datetime(2005, 7, 28)
)
def test_default_ordering(self):
"""
By default, Article.objects.all() orders by pub_date descending, then
headline ascending.
"""
self.assertQuerysetEqual(
Article.objects.all(), [
"Article 4",
"Article 2",
"Article 3",
"Article 1",
],
attrgetter("headline")
)
# Getting a single item should work too:
self.assertEqual(Article.objects.all()[0], self.a4)
def test_default_ordering_override(self):
"""
Override ordering with order_by, which is in the same format as the
ordering attribute in models.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline"), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by("pub_date", "-headline"), [
"Article 1",
"Article 3",
"Article 2",
"Article 4",
],
attrgetter("headline")
)
def test_order_by_override(self):
"""
Only the last order_by has any effect (since they each override any
previous ordering).
"""
self.assertQuerysetEqual(
Article.objects.order_by("id"), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by("id").order_by("-headline"), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_stop_slicing(self):
"""
Use the 'stop' part of slicing notation to limit the results.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline")[:2], [
"Article 1",
"Article 2",
],
attrgetter("headline")
)
def test_stop_start_slicing(self):
"""
Use the 'stop' and 'start' parts of slicing notation to offset the
result list.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline")[1:3], [
"Article 2",
"Article 3",
],
attrgetter("headline")
)
def test_random_ordering(self):
"""
Use '?' to order randomly.
"""
self.assertEqual(
len(list(Article.objects.order_by("?"))), 4
)
def test_reversed_ordering(self):
"""
Ordering can be reversed using the reverse() method on a queryset.
This allows you to extract things like "the last two items" (reverse
and then take the first two).
"""
self.assertQuerysetEqual(
Article.objects.all().reverse()[:2], [
"Article 1",
"Article 3",
],
attrgetter("headline")
)
def test_reverse_ordering_pure(self):
qs1 = Article.objects.order_by(F('headline').asc())
qs2 = qs1.reverse()
self.assertQuerysetEqual(
qs1, [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
qs2, [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_extra_ordering(self):
"""
Ordering can be based on fields included from an 'extra' clause
"""
self.assertQuerysetEqual(
Article.objects.extra(select={"foo": "pub_date"}, order_by=["foo", "headline"]), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
def test_extra_ordering_quoting(self):
"""
If the extra clause uses an SQL keyword for a name, it will be
protected by quoting.
"""
self.assertQuerysetEqual(
Article.objects.extra(select={"order": "pub_date"}, order_by=["order", "headline"]), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
def test_extra_ordering_with_table_name(self):
self.assertQuerysetEqual(
Article.objects.extra(order_by=['ordering_article.headline']), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.extra(order_by=['-ordering_article.headline']), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_pk(self):
"""
Ensure that 'pk' works as an ordering option in Meta.
Refs #8291.
"""
Author.objects.create(pk=1)
Author.objects.create(pk=2)
Author.objects.create(pk=3)
Author.objects.create(pk=4)
self.assertQuerysetEqual(
Author.objects.all(), [
4, 3, 2, 1
],
attrgetter("pk")
)
def test_order_by_fk_attname(self):
"""
Ensure that ordering by a foreign key by its attribute name prevents
the query from inheriting it's related model ordering option.
Refs #19195.
"""
for i in range(1, 5):
author = Author.objects.create(pk=i)
article = getattr(self, "a%d" % (5 - i))
article.author = author
article.save(update_fields={'author'})
self.assertQuerysetEqual(
Article.objects.order_by('author_id'), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_f_expression(self):
self.assertQuerysetEqual(
Article.objects.order_by(F('headline')), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by(F('headline').asc()), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by(F('headline').desc()), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_f_expression_duplicates(self):
"""
A column may only be included once (the first occurrence) so we check
to ensure there are no duplicates by inspecting the SQL.
"""
qs = Article.objects.order_by(F('headline').asc(), F('headline').desc())
sql = str(qs.query).upper()
fragment = sql[sql.find('ORDER BY'):]
self.assertEqual(fragment.count('HEADLINE'), 1)
self.assertQuerysetEqual(
qs, [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
qs = Article.objects.order_by(F('headline').desc(), F('headline').asc())
sql = str(qs.query).upper()
fragment = sql[sql.find('ORDER BY'):]
self.assertEqual(fragment.count('HEADLINE'), 1)
self.assertQuerysetEqual(
qs, [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_related_ordering_duplicate_table_reference(self):
"""
An ordering referencing a model with an ordering referencing a model
multiple time no circular reference should be detected (#24654).
"""
first_author = Author.objects.create()
second_author = Author.objects.create()
self.a1.author = first_author
self.a1.second_author = second_author
self.a1.save()
self.a2.author = second_author
self.a2.second_author = first_author
self.a2.save()
r1 = Reference.objects.create(article_id=self.a1.pk)
r2 = Reference.objects.create(article_id=self.a2.pk)
self.assertQuerysetEqual(Reference.objects.all(), [r2, r1], lambda x: x)
| bsd-3-clause |
espadrine/opera | chromium/src/build/android/PRESUBMIT.py | 59 | 1751 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for android buildbot.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
_DELETIONS_ONLY_FILES = (
'build/android/findbugs_filter/findbugs_known_bugs.txt',
)
def _CheckDeletionsOnlyFiles(input_api, output_api):
"""Check that a certain listed files only have deletions.
"""
warnings = []
for f in input_api.AffectedFiles():
if f.LocalPath() in _DELETIONS_ONLY_FILES:
if f.ChangedContents():
warnings.append(f.LocalPath())
results = []
if warnings:
results.append(output_api.PresubmitPromptWarning(
'Following files should only contain deletions.', warnings))
return results
def CommonChecks(input_api, output_api):
output = []
def J(*dirs):
"""Returns a path relative to presubmit directory."""
return input_api.os_path.join(input_api.PresubmitLocalPath(), *dirs)
output.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
white_list=[r'PRESUBMIT\.py$', r'buildbot/.*\.py$'],
extra_paths_list=[
J(), J('..', '..', 'third_party', 'android_testrunner'),
J('buildbot')]))
output.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, J('buildbot', 'tests')))
output.extend(_CheckDeletionsOnlyFiles(input_api, output_api))
return output
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
| bsd-3-clause |
gohin/django | tests/csrf_tests/tests.py | 152 | 19350 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.middleware.csrf import (
CSRF_KEY_LENGTH, CsrfViewMiddleware, get_token,
)
from django.template import RequestContext, Template
from django.template.context_processors import csrf
from django.test import SimpleTestCase, override_settings
from django.views.decorators.csrf import (
csrf_exempt, ensure_csrf_cookie, requires_csrf_token,
)
# Response/views used for CsrfResponseMiddleware and CsrfViewMiddleware tests
def post_form_response():
resp = HttpResponse(content="""
<html><body><h1>\u00a1Unicode!<form method="post"><input type="text" /></form></body></html>
""", mimetype="text/html")
return resp
def post_form_view(request):
"""A view that returns a POST form (without a token)"""
return post_form_response()
# Response/views used for template tag tests
def token_view(request):
"""A view that uses {% csrf_token %}"""
context = RequestContext(request, processors=[csrf])
template = Template("{% csrf_token %}")
return HttpResponse(template.render(context))
def non_token_view_using_request_processor(request):
"""
A view that doesn't use the token, but does use the csrf view processor.
"""
context = RequestContext(request, processors=[csrf])
template = Template("")
return HttpResponse(template.render(context))
class TestingHttpRequest(HttpRequest):
"""
A version of HttpRequest that allows us to change some things
more easily
"""
def is_secure(self):
return getattr(self, '_is_secure_override', False)
class CsrfViewMiddlewareTest(SimpleTestCase):
# The csrf token is potentially from an untrusted source, so could have
# characters that need dealing with.
_csrf_id_cookie = b"<1>\xc2\xa1"
_csrf_id = "1"
def _get_GET_no_csrf_cookie_request(self):
return TestingHttpRequest()
def _get_GET_csrf_cookie_request(self):
req = TestingHttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie
return req
def _get_POST_csrf_cookie_request(self):
req = self._get_GET_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_no_csrf_cookie_request(self):
req = self._get_GET_no_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_request_with_token(self):
req = self._get_POST_csrf_cookie_request()
req.POST['csrfmiddlewaretoken'] = self._csrf_id
return req
def _check_token_present(self, response, csrf_id=None):
self.assertContains(response, "name='csrfmiddlewaretoken' value='%s'" % (csrf_id or self._csrf_id))
def test_process_view_token_too_long(self):
"""
Check that if the token is longer than expected, it is ignored and
a new token is created.
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = 'x' * 10000000
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(len(csrf_cookie.value), CSRF_KEY_LENGTH)
def test_process_response_get_token_used(self):
"""
When get_token is used, check that the cookie is created and headers
patched.
"""
req = self._get_GET_no_csrf_cookie_request()
# Put tests for CSRF_COOKIE_* settings here
with self.settings(CSRF_COOKIE_NAME='myname',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get('myname', False)
self.assertNotEqual(csrf_cookie, False)
self.assertEqual(csrf_cookie['domain'], '.example.com')
self.assertEqual(csrf_cookie['secure'], True)
self.assertEqual(csrf_cookie['httponly'], True)
self.assertEqual(csrf_cookie['path'], '/test/')
self.assertIn('Cookie', resp2.get('Vary', ''))
def test_process_response_get_token_not_used(self):
"""
Check that if get_token() is not called, the view middleware does not
add a cookie.
"""
# This is important to make pages cacheable. Pages which do call
# get_token(), assuming they use the token, are not cacheable because
# the token is specific to the user
req = self._get_GET_no_csrf_cookie_request()
# non_token_view_using_request_processor does not call get_token(), but
# does use the csrf request processor. By using this, we are testing
# that the view processor is properly lazy and doesn't call get_token()
# until needed.
CsrfViewMiddleware().process_view(req, non_token_view_using_request_processor, (), {})
resp = non_token_view_using_request_processor(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(csrf_cookie, False)
# Check the request processing
def test_process_request_no_csrf_cookie(self):
"""
Check that if no CSRF cookies is present, the middleware rejects the
incoming request. This will stop login CSRF.
"""
req = self._get_POST_no_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_no_token(self):
"""
Check that if a CSRF cookie is present but no token, the middleware
rejects the incoming request.
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_and_token(self):
"""
Check that if both a cookie and a token is present, the middleware lets it through.
"""
req = self._get_POST_request_with_token()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
def test_process_request_csrf_cookie_no_token_exempt_view(self):
"""
Check that if a CSRF cookie is present and no token, but the csrf_exempt
decorator has been applied to the view, the middleware lets it through
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, csrf_exempt(post_form_view), (), {})
self.assertIsNone(req2)
def test_csrf_token_in_header(self):
"""
Check that we can pass in the token in a header instead of in the form
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
@override_settings(CSRF_HEADER_NAME='HTTP_X_CSRFTOKEN_CUSTOMIZED')
def test_csrf_token_in_header_with_customized_name(self):
"""
settings.CSRF_HEADER_NAME can be used to customize the CSRF header name
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN_CUSTOMIZED'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
def test_put_and_delete_rejected(self):
"""
Tests that HTTP PUT and DELETE methods have protection
"""
req = TestingHttpRequest()
req.method = 'PUT'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
req = TestingHttpRequest()
req.method = 'DELETE'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_put_and_delete_allowed(self):
"""
Tests that HTTP PUT and DELETE methods can get through with
X-CSRFToken and a cookie
"""
req = self._get_GET_csrf_cookie_request()
req.method = 'PUT'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
req = self._get_GET_csrf_cookie_request()
req.method = 'DELETE'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
# Tests for the template tag method
def test_token_node_no_csrf_cookie(self):
"""
Check that CsrfTokenNode works when no CSRF cookie is set
"""
req = self._get_GET_no_csrf_cookie_request()
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
self._check_token_present(resp, token)
def test_token_node_empty_csrf_cookie(self):
"""
Check that we get a new token if the csrf_cookie is the empty string
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = b""
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
self._check_token_present(resp, token)
def test_token_node_with_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is set
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_exempt_view(self):
"""
Check that get_token still works for a view decorated with 'csrf_exempt'.
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, csrf_exempt(token_view), (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_requires_csrf_token_view(self):
"""
Check that get_token works for a view decorated solely with requires_csrf_token
"""
req = self._get_GET_csrf_cookie_request()
resp = requires_csrf_token(token_view)(req)
self._check_token_present(resp)
def test_token_node_with_new_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is created by
the middleware (when one was not already present)
"""
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies[settings.CSRF_COOKIE_NAME]
self._check_token_present(resp, csrf_id=csrf_cookie.value)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_bad_referer(self):
"""
Test that a POST HTTPS request with a bad referer is rejected
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNotNone(req2)
self.assertEqual(403, req2.status_code)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_malformed_referer(self):
"""
Test that a POST HTTPS request with a bad referer is rejected
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'http://http://www.example.com/'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNotNone(req2)
self.assertEqual(403, req2.status_code)
# Non-ASCII
req.META['HTTP_REFERER'] = b'\xd8B\xf6I\xdf'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNotNone(req2)
self.assertEqual(403, req2.status_code)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer(self):
"""
Test that a POST HTTPS request with a good referer is accepted
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com/somepage'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer_2(self):
"""
Test that a POST HTTPS request with a good referer is accepted
where the referer contains no trailing slash
"""
# See ticket #15617
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(req2)
def test_ensures_csrf_cookie_no_middleware(self):
"""
Tests that ensures_csrf_cookie decorator fulfils its promise
with no middleware
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
resp = view(req)
self.assertTrue(resp.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertIn('Cookie', resp.get('Vary', ''))
def test_ensures_csrf_cookie_with_middleware(self):
"""
Tests that ensures_csrf_cookie decorator fulfils its promise
with the middleware enabled.
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, view, (), {})
resp = view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
self.assertTrue(resp2.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertIn('Cookie', resp2.get('Vary', ''))
def test_ensures_csrf_cookie_no_logging(self):
"""
Tests that ensure_csrf_cookie doesn't log warnings. See #19436.
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
class TestHandler(logging.Handler):
def emit(self, record):
raise Exception("This shouldn't have happened!")
logger = logging.getLogger('django.request')
test_handler = TestHandler()
old_log_level = logger.level
try:
logger.addHandler(test_handler)
logger.setLevel(logging.WARNING)
req = self._get_GET_no_csrf_cookie_request()
view(req)
finally:
logger.removeHandler(test_handler)
logger.setLevel(old_log_level)
def test_csrf_cookie_age(self):
"""
Test to verify CSRF cookie age can be set using
settings.CSRF_COOKIE_AGE.
"""
req = self._get_GET_no_csrf_cookie_request()
MAX_AGE = 123
with self.settings(CSRF_COOKIE_NAME='csrfcookie',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
max_age = resp2.cookies.get('csrfcookie').get('max-age')
self.assertEqual(max_age, MAX_AGE)
def test_csrf_cookie_age_none(self):
"""
Test to verify CSRF cookie age does not have max age set and therefore
uses session-based cookies.
"""
req = self._get_GET_no_csrf_cookie_request()
MAX_AGE = None
with self.settings(CSRF_COOKIE_NAME='csrfcookie',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
max_age = resp2.cookies.get('csrfcookie').get('max-age')
self.assertEqual(max_age, '')
def test_post_data_read_failure(self):
"""
#20128 -- IOErrors during POST data reading should be caught and
treated as if the POST data wasn't there.
"""
class CsrfPostRequest(HttpRequest):
"""
HttpRequest that can raise an IOError when accessing POST data
"""
def __init__(self, token, raise_error):
super(CsrfPostRequest, self).__init__()
self.method = 'POST'
self.raise_error = False
self.COOKIES[settings.CSRF_COOKIE_NAME] = token
self.POST['csrfmiddlewaretoken'] = token
self.raise_error = raise_error
def _load_post_and_files(self):
raise IOError('error reading input data')
def _get_post(self):
if self.raise_error:
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
POST = property(_get_post, _set_post)
token = 'ABC'
req = CsrfPostRequest(token, raise_error=False)
resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
req = CsrfPostRequest(token, raise_error=True)
resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(resp.status_code, 403)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.