repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
intel-iot-devkit/upm | examples/python/abp.py | 6 | 2128 | #!/usr/bin/env python
# Author: Abhishek Malik <abhishek.malik@intel.com>
# Copyright (c) 2017 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_abp as abp
def main():
# Instantiate a Honeywell ABP Pressure sensor on the I2C bus 0
abp_sensor = abp.ABP(0, 0x28);
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit, including functions from abpdrrt005pg2a5
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# Read the value every second and detect the pressure
while(1):
abp_sensor.update()
print("Pressure: {0}".format(abp_sensor.getPressure()))
print("Temperature: {0}".format(abp_sensor.getTemperature()))
time.sleep(1)
if __name__ == '__main__':
main()
| mit |
teltek/edx-platform | lms/djangoapps/verify_student/management/commands/retry_failed_photo_verifications.py | 21 | 1631 | """
Django admin commands related to verify_student
"""
from django.core.management.base import BaseCommand
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
class Command(BaseCommand):
"""
This method finds those PhotoVerifications with a status of
MUST_RETRY and attempts to verify them.
"""
args = "<SoftwareSecurePhotoVerification id, SoftwareSecurePhotoVerification id, ...>"
help = (
"Retries SoftwareSecurePhotoVerifications passed as "
"arguments, or if no arguments are supplied, all that "
"are in a state of 'must_retry'"
)
def handle(self, *args, **options):
if args:
attempts_to_retry = SoftwareSecurePhotoVerification.objects.filter(
receipt_id__in=args
)
force_must_retry = True
else:
attempts_to_retry = SoftwareSecurePhotoVerification.objects.filter(status='must_retry')
force_must_retry = False
print "Attempting to retry {0} failed PhotoVerification submissions".format(len(attempts_to_retry))
for index, attempt in enumerate(attempts_to_retry):
print "Retrying submission #{0} (ID: {1}, User: {2})".format(index, attempt.id, attempt.user)
# Set the attempts status to 'must_retry' so that we can re-submit it
if force_must_retry:
attempt.status = 'must_retry'
attempt.submit(copy_id_photo_from=attempt.copy_id_photo_from)
print "Retry result: {0}".format(attempt.status)
print "Done resubmitting failed photo verifications"
| agpl-3.0 |
opps/opps | tests/utils/test_text.py | 4 | 1499 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from opps.utils.text import split_tags
class SplitTags(TestCase):
def test_common(self):
source = 'gabriela pugliesi e Ricardo,NOVIDADE,instagram'
output = ['gabriela pugliesi e Ricardo', 'NOVIDADE', 'instagram']
self.assertEqual(split_tags(source), output)
def test_duplicated_comma(self):
source = 'entretenimento,,Taylor Swift,The Voice,famosos'
output = ['entretenimento', 'Taylor Swift', 'The Voice', 'famosos']
self.assertEqual(split_tags(source), output)
def test_duplicated_commas_and_extra_spaces(self):
source = ',entretenimento,, Taylor Swift, The Voice,famosos , ,'
output = ['entretenimento', 'Taylor Swift', 'The Voice', 'famosos']
self.assertEqual(split_tags(source), output)
def test_another_separator(self):
source = 'Mariah Carey{0}filhos{0}música{0}entretenimento'
output = ['Mariah Carey', 'filhos', 'música', 'entretenimento']
self.assertEqual(split_tags(source.format('|'), separator="|"), output)
self.assertEqual(split_tags(source.format('#'), separator="#"), output)
# Multiple chars
self.assertEqual(split_tags(source.format('%%'), separator="%%"),
output)
def test_empty(self):
self.assertEqual(split_tags(' '), [])
self.assertEqual(split_tags(' '), [])
| mit |
asmundg/coherence | coherence/backends/picasa_storage.py | 3 | 8160 | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2009, Jean-Michel Sizun
# Copyright 2009 Frank Scholz <coherence@beebits.net>
import os.path
import time
from twisted.internet import threads
from twisted.web import server, static
from twisted.web.error import PageRedirect
from coherence.upnp.core.utils import ReverseProxyUriResource
from twisted.internet import task
from coherence.upnp.core import utils
from coherence.upnp.core import DIDLLite
from coherence.backend import BackendStore, BackendItem, Container, LazyContainer, \
AbstractBackendStore
from coherence import log
from urlparse import urlsplit
import gdata.photos.service
import gdata.media
import gdata.geo
class PicasaProxy(ReverseProxyUriResource):
def __init__(self, uri):
ReverseProxyUriResource.__init__(self, uri)
def render(self, request):
if request.received_headers.has_key('referer'):
del request.received_headers['referer']
return ReverseProxyUriResource.render(self, request)
class PicasaPhotoItem(BackendItem):
def __init__(self, photo):
#print photo
self.photo = photo
self.name = photo.summary.text
if self.name is None:
self.name = photo.title.text
self.duration = None
self.size = None
self.mimetype = photo.content.type
self.description = photo.summary.text
self.date = None
self.item = None
self.photo_url = photo.content.src
self.thumbnail_url = photo.media.thumbnail[0].url
self.url = None
self.location = PicasaProxy(self.photo_url)
def replace_by(self, item):
#print photo
self.photo = item.photo
self.name = photo.summary.text
if self.name is None:
self.name = photo.title.text
self.mimetype = self.photo.content.type
self.description = self.photo.summary.text
self.photo_url = self.photo.content.src
self.thumbnail_url = self.photo.media.thumbnail[0].url
self.location = PicasaProxy(self.photo_url)
return True
def get_item(self):
if self.item == None:
upnp_id = self.get_id()
upnp_parent_id = self.parent.get_id()
self.item = DIDLLite.Photo(upnp_id,upnp_parent_id,self.name)
res = DIDLLite.Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
self.item.res.append(res)
self.item.childCount = 0
return self.item
def get_path(self):
return self.url
def get_id(self):
return self.storage_id
class PicasaStore(AbstractBackendStore):
logCategory = 'picasa_store'
implements = ['MediaServer']
description = ('Picasa Web Albums', 'connects to the Picasa Web Albums service and exposes the featured photos and albums for a given user.', None)
options = [{'option':'name', 'text':'Server Name:', 'type':'string','default':'my media','help': 'the name under this MediaServer shall show up with on other UPnP clients'},
{'option':'version','text':'UPnP Version:','type':'int','default':2,'enum': (2,1),'help': 'the highest UPnP version this MediaServer shall support','level':'advance'},
{'option':'uuid','text':'UUID Identifier:','type':'string','help':'the unique (UPnP) identifier for this MediaServer, usually automatically set','level':'advance'},
{'option':'refresh','text':'Refresh period','type':'string'},
{'option':'login','text':'User ID:','type':'string','group':'User Account'},
{'option':'password','text':'Password:','type':'string','group':'User Account'},
]
def __init__(self, server, **kwargs):
AbstractBackendStore.__init__(self, server, **kwargs)
self.name = kwargs.get('name','Picasa Web Albums')
self.refresh = int(kwargs.get('refresh',60))*60
self.login = kwargs.get('userid',kwargs.get('login',''))
self.password = kwargs.get('password','')
rootContainer = Container(None, self.name)
self.set_root_item(rootContainer)
self.AlbumsContainer = LazyContainer(rootContainer, 'My Albums', None, self.refresh, self.retrieveAlbums)
rootContainer.add_child(self.AlbumsContainer)
self.FeaturedContainer = LazyContainer(rootContainer, 'Featured photos', None, self.refresh, self.retrieveFeaturedPhotos)
rootContainer.add_child(self.FeaturedContainer)
self.init_completed()
def __repr__(self):
return self.__class__.__name__
def upnp_init(self):
self.current_connection_id = None
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_TN;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_SM;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_MED;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_LRG;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:*,'
'http-get:*:image/gif:*,'
'http-get:*:image/png:*',
default=True)
self.wmc_mapping = {'16': self.get_root_id()}
self.gd_client = gdata.photos.service.PhotosService()
self.gd_client.email = self.login
self.gd_client.password = self.password
self.gd_client.source = 'Coherence UPnP backend'
if len(self.login) > 0:
d = threads.deferToThread(self.gd_client.ProgrammaticLogin)
def retrieveAlbums(self, parent=None):
albums = threads.deferToThread(self.gd_client.GetUserFeed)
def gotAlbums(albums):
if albums is None:
print "Unable to retrieve albums"
return
for album in albums.entry:
title = album.title.text
album_id = album.gphoto_id.text
item = LazyContainer(parent, title, album_id, self.refresh, self.retrieveAlbumPhotos, album_id=album_id)
parent.add_child(item, external_id=album_id)
def gotError(error):
print "ERROR: %s" % error
albums.addCallbacks(gotAlbums, gotError)
return albums
def retrieveFeedPhotos (self, parent=None, feed_uri=''):
#print feed_uri
photos = threads.deferToThread(self.gd_client.GetFeed, feed_uri)
def gotPhotos(photos):
if photos is None:
print "Unable to retrieve photos for feed %s" % feed_uri
return
for photo in photos.entry:
photo_id = photo.gphoto_id.text
item = PicasaPhotoItem(photo)
item.parent = parent
parent.add_child(item, external_id=photo_id)
def gotError(error):
print "ERROR: %s" % error
photos.addCallbacks(gotPhotos, gotError)
return photos
def retrieveAlbumPhotos (self, parent=None, album_id=''):
album_feed_uri = '/data/feed/api/user/%s/albumid/%s?kind=photo' % (self.login, album_id)
return self.retrieveFeedPhotos(parent, album_feed_uri)
def retrieveFeaturedPhotos (self, parent=None):
feed_uri = 'http://picasaweb.google.com/data/feed/api/featured'
return self.retrieveFeedPhotos(parent, feed_uri)
| mit |
olkku/tf-info | fabfile.py | 1 | 1686 | from __future__ import with_statement
from fabric.api import *
from fabric.contrib.console import confirm
env.project_name = 'info-reborn'
env.www_user = 'www-data'
env.project_dir = '/var/www/%s/'%env.project_name
env.sites_available = '/etc/apache2/sites-available/'
def check_uncommitted_changes():
num_changes_str = local('git status --porcelain | wc -l', capture=True)
num_changes = int(num_changes_str)
if num_changes > 0 and not confirm("Uncommited changes, continue anyway?"):
abort("Aborting at user request.")
def test():
with settings(warn_only=True):
result = local('python manage.py test', capture=True)
if result.failed and not confirm("Tests failed, continue anyway?"):
abort("Aborting at user request.")
def install(commit):
with cd(env.project_dir):
sudo('tar xvf /tmp/%s.tar'%commit, user=env.www_user)
sudo('rm /tmp/%s.tar'%commit)
with prefix('source bin/activate'):
sudo('pip install -r requirements.txt', user=env.www_user)
sudo('python manage.py collectstatic --noinput', user=env.www_user)
sudo('python manage.py migrate', user=env.www_user)
sudo('python manage.py loaddata fixtures', user=env.www_user)
sudo('a2ensite %s'%env.project_name)
sudo('apache2ctl configtest')
sudo('apache2ctl graceful')
def transfer(commit):
local('git archive %s > %s.tar'%(commit, commit))
put('%s.tar'%commit, '/tmp/')
local('rm %s.tar'%commit)
def deploy(commit=None):
if commit == None:
commit = local('git rev-parse --abbrev-ref HEAD', capture=True)
puts('Deploying current branch: %s'%commit)
check_uncommitted_changes()
test()
transfer(commit)
install(commit)
| bsd-3-clause |
jaggu303619/asylum-v2.0 | openerp/addons/lunch/__openerp__.py | 53 | 2541 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Lunch Orders',
'author': 'OpenERP SA',
'version': '0.2',
'depends': ['base'],
'category' : 'Tools',
'summary': 'Lunch Order, Meal, Food',
'description': """
The base module to manage lunch.
================================
Many companies order sandwiches, pizzas and other, from usual suppliers, for their employees to offer them more facilities.
However lunches management within the company requires proper administration especially when the number of employees or suppliers is important.
The “Lunch Order” module has been developed to make this management easier but also to offer employees more tools and usability.
In addition to a full meal and supplier management, this module offers the possibility to display warning and provides quick order selection based on employee’s preferences.
If you want to save your employees' time and avoid them to always have coins in their pockets, this module is essential.
""",
'data': ['security/lunch_security.xml','lunch_view.xml','wizard/lunch_order_view.xml','wizard/lunch_validation_view.xml','wizard/lunch_cancel_view.xml','lunch_report.xml',
'report/report_lunch_order_view.xml',
'security/ir.model.access.csv',],
'css':['static/src/css/lunch.css'],
'images': ['images/new_order.jpeg','images/lunch_account.jpeg','images/order_by_supplier_analysis.jpeg','images/alert.jpeg'],
'demo': ['lunch_demo.xml',],
'installable': True,
'application' : True,
'certificate' : '001292377792581874189',
'images': [],
}
| agpl-3.0 |
smiller171/ansible | lib/ansible/plugins/connection/jail.py | 15 | 7802 | # Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2013, Michael Scherer <misc@zarb.org>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import os.path
import pipes
import subprocess
import traceback
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.connection import ConnectionBase
BUFSIZE = 65536
class Connection(ConnectionBase):
''' Local BSD Jail based connections '''
transport = 'jail'
# Pipelining may work. Someone needs to test by setting this to True and
# having pipelining=True in their ansible.cfg
has_pipelining = False
# Some become_methods may work in v2 (sudo works for other chroot-based
# plugins while su seems to be failing). If some work, check chroot.py to
# see how to disable just some methods.
become_methods = frozenset()
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.jail = self._play_context.remote_addr
if os.geteuid() != 0:
raise AnsibleError("jail connection requires running as root")
self.jls_cmd = self._search_executable('jls')
self.jexec_cmd = self._search_executable('jexec')
if not self.jail in self.list_jails():
raise AnsibleError("incorrect jail name %s" % self.jail)
@staticmethod
def _search_executable(executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise AnsibleError("%s command not found in PATH") % executable
return cmd
def list_jails(self):
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout.split()
def get_jail_path(self):
p = subprocess.Popen([self.jls_cmd, '-j', self.jail, '-q', 'path'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# remove \n
return stdout[:-1]
def _connect(self):
''' connect to the jail; nothing to do here '''
super(Connection, self)._connect()
if not self._connected:
self._display.vvv("THIS IS A LOCAL JAIL DIR", host=self.jail)
self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
''' run a command on the jail. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd]
self._display.vvv("EXEC %s" % (local_cmd), host=self.jail)
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the jail '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
# TODO: Check whether we can send the command to stdin via
# p.communicate(in_data)
# If we can, then we can change this plugin to has_pipelining=True and
# remove the error if in_data is given.
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
''' transfer a file from local to jail '''
super(Connection, self).put_file(in_path, out_path)
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
out_path = pipes.quote(self._prefix_login_path(out_path))
try:
with open(in_path, 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from jail to local '''
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
in_path = pipes.quote(self._prefix_login_path(in_path))
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
with open(out_path, 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
| gpl-3.0 |
ilope236/ptavi-p3 | smallsmilhandler.py | 1 | 1139 | #!/usr/bin/python
# -*- coding: iso-8859-15 -*-
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
class SmallSMILHandler(ContentHandler):
def __init__(self):
self.lista_dic = []
self.tags = ['root-layout', 'region', 'img', 'audio', 'textstream']
self.attrs = {
'root-layout': ['width', 'height', 'background-color'],
'region': ['id', 'top', 'bottom', 'left', 'right'],
'img': ['src', 'region', 'begin', 'dur'],
'audio': ['src', 'begin', 'dur'],
'textstream': ['src', 'region']
}
def startElement(self, name, attrs):
dic_attrs = {}
if name in self.tags:
dic_attrs['name'] = name
for atributo in self.attrs[name]:
dic_attrs[atributo] = attrs.get(atributo, "")
self.lista_dic.append(dic_attrs)
def get_tags(self):
return self.lista_dic
if __name__ == "__main__":
parser = make_parser()
sHandler = SmallSMILHandler()
parser.setContentHandler(sHandler)
parser.parse(open('karaoke.smil'))
print sHandler.get_tags()
| gpl-2.0 |
wdzhou/mantid | Framework/PythonInterface/test/python/mantid/api/SampleTest.py | 3 | 2593 | from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.api import Sample
from mantid.simpleapi import CreateWorkspace
from mantid.simpleapi import SetSampleMaterial
from mantid.geometry import CrystalStructure
class SampleTest(unittest.TestCase):
def setUp(self):
self._ws = CreateWorkspace(DataX=[1,2,3,4,5], DataY=[1,2,3,4,5], OutputWorkspace="dummy")
def test_geometry_getters_and_setters(self):
sample = self._ws.sample()
sample.setThickness(12.5)
self.assertEquals(sample.getThickness(), 12.5)
sample.setHeight(10.2)
self.assertEquals(sample.getHeight(), 10.2)
sample.setWidth(5.9)
self.assertEquals(sample.getWidth(), 5.9)
def test_crystal_structure_handling(self):
sample = self._ws.sample()
self.assertEquals(sample.hasCrystalStructure(), False)
self.assertRaises(RuntimeError, sample.getCrystalStructure)
cs = CrystalStructure('5.43 5.43 5.43',
'F d -3 m',
'Si 0 0 0 1.0 0.01')
sample.setCrystalStructure(cs)
self.assertEquals(sample.hasCrystalStructure(), True)
cs_from_sample = sample.getCrystalStructure()
self.assertEquals(cs.getSpaceGroup().getHMSymbol(), cs_from_sample.getSpaceGroup().getHMSymbol())
self.assertEquals(cs.getUnitCell().a(), cs_from_sample.getUnitCell().a())
self.assertEquals(len(cs.getScatterers()), len(cs_from_sample.getScatterers()))
self.assertEquals(cs.getScatterers()[0], cs_from_sample.getScatterers()[0])
sample.clearCrystalStructure()
self.assertEquals(sample.hasCrystalStructure(), False)
self.assertRaises(RuntimeError, sample.getCrystalStructure)
def test_material(self):
SetSampleMaterial(self._ws,"Al2 O3",SampleMassDensity=4)
material = self._ws.sample().getMaterial()
self.assertAlmostEqual(material.numberDensity, 0.1181, places=4)
self.assertAlmostEqual(material.relativeMolecularMass(), 101.961, places=3)
atoms, numatoms = material.chemicalFormula()
self.assertEquals(len(atoms), len(numatoms))
self.assertEquals(len(atoms), 2)
self.assertEquals(numatoms[0], 2)
self.assertEquals(numatoms[1], 3)
xs0 = atoms[0].neutron()
xs1 = atoms[1].neutron()
xs = ( xs0['coh_scatt_xs']*2 + xs1['coh_scatt_xs']*3 ) / 5
self.assertAlmostEquals(material.cohScatterXSection(), xs, places=4)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
mdrobisch/roseguarden | server/app/models.py | 2 | 12143 | __author__ = 'drobisch'
from server import db, flask_bcrypt, orm
from wtforms.validators import Email
import random
import base64
import datetime
import marshmallow
class User(db.Model):
ACCESSTYPE_NO_ACCESS = 0
ACCESSTYPE_DAILY_ACCESS_PERIOD = 1
ACCESSTYPE_ACCESS_DAYS = 2
ACCESSTYPE_LIFETIME_ACCESS = 3
ACCESSTYPE_MONTHLY_BUDGET = 4
ACCESSTYPE_QUARTERLY_BUDGET = 5
ACCESSTYPE_ABSOLUT_ACCESS_PERIOD = 6
ACCESSTYPE_MAX = 7
AUTHTYPE_WEB = 0
AUTHTYPE_RFID = 1
ROLE_USER = 0
ROLE_ADMIN = 1
ROLE_SUPERVISOR = 2
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 4
THURSDAY = 8
FRIDAY = 16
SATURDAY = 32
SUNDAY = 64
id = db.Column(db.Integer, primary_key=True)
syncMaster = db.Column(db.Integer)
active = db.Column(db.Integer)
email = db.Column(db.Text, unique=True, nullable=False, info={'validators': Email()})
password = db.Column(db.Text, nullable=False)
token = db.Column(db.Text)
tokenExpirationDate = db.Column(db.DateTime)
firstName = db.Column(db.Text)
lastName = db.Column(db.Text)
phone = db.Column(db.Text)
association = db.Column(db.Text)
role = db.Column(db.Integer)
cardID = db.Column(db.Text)
cardAuthSector = db.Column(db.Integer)
cardAuthBlock = db.Column(db.Integer)
cardAuthKeyA = db.Column(db.Text)
cardAuthKeyB = db.Column(db.Text)
cardSecret = db.Column(db.Text)
licenseMask = db.Column(db.Integer)
weeklyAccessAverage = db.Column(db.Integer)
weeklyAccessWeekNumber = db.Column(db.Integer)
weeklyAccessCount = db.Column(db.Integer)
monthlyAccessAverage = db.Column(db.Integer)
monthlyAccessMonthNumber = db.Column(db.Integer)
monthlyAccessCount = db.Column(db.Integer)
keyMask = db.Column(db.Integer)
accessType = db.Column(db.Integer)
accessDateStart = db.Column(db.DateTime)
accessDateEnd = db.Column(db.DateTime)
accessTimeStart = db.Column(db.DateTime)
accessTimeEnd = db.Column(db.DateTime)
accessDaysMask = db.Column(db.Integer)
accessDayCounter = db.Column(db.Integer)
accessDayCyclicBudget = db.Column(db.Integer)
lastAccessDaysUpdateDate = db.Column(db.DateTime)
lastLoginDateTime = db.Column(db.DateTime)
lastSyncDateTime = db.Column(db.DateTime)
registerDateTime = db.Column(db.DateTime)
lastAccessDateTime = db.Column(db.DateTime)
budget = db.Column(db.Float)
lastBudgetUpdateDate = db.Column(db.DateTime)
@orm.reconstructor
def init_on_load(self):
if self.cardID == "":
self.cardIDAssigned = 0
else:
self.cardIDAssigned = 1
def updateUserFromSyncDict(self, data):
self.syncMaster = data['syncMaster']
self.active = data['active']
self.phone = data['phone']
self.cardAuthBlock = data['cardAuthBlock']
self.cardAuthSector = data['cardAuthSector']
self.cardID = data['cardID']
self.cardSecret = data['cardSecret']
self.cardAuthKeyA = data['cardAuthKeyA']
self.cardAuthKeyB = data['cardAuthKeyB']
self.role = data['role']
self.email = data['email']
self.password = data['password']
self.firstName = data['firstName']
self.lastName = data['lastName']
self.association = data['association']
self.phone = data['phone']
self.keyMask = data['keyMask']
self.licenseMask = data['licenseMask']
self.accessDaysMask = data['accessDaysMask']
self.accessType = data['accessType']
self.accessDayCounter = data['accessDayCounter']
self.accessDayCyclicBudget = data['accessDayCyclicBudget']
if self.lastLoginDateTime < datetime.datetime.strptime(data['lastLoginDateTime'][:19], '%Y-%m-%dT%H:%M:%S'):
self.lastLoginDateTime = datetime.datetime.strptime(data['lastLoginDateTime'][:19], '%Y-%m-%dT%H:%M:%S')
if self.lastAccessDateTime < datetime.datetime.strptime(data['lastLoginDateTime'][:19], '%Y-%m-%dT%H:%M:%S'):
self.lastAccessDateTime = datetime.datetime.strptime(data['lastLoginDateTime'][:19], '%Y-%m-%dT%H:%M:%S')
if self.lastBudgetUpdateDate < datetime.datetime.strptime(data['lastBudgetUpdateDate'][:19], '%Y-%m-%dT%H:%M:%S'):
self.lastBudgetUpdateDate = datetime.datetime.strptime(data['lastBudgetUpdateDate'][:19], '%Y-%m-%dT%H:%M:%S')
self.lastAccessDaysUpdateDate = datetime.datetime.strptime(data['lastAccessDaysUpdateDate'][:19], '%Y-%m-%dT%H:%M:%S')
self.registerDateTime = datetime.datetime.strptime(data['registerDateTime'][:19], '%Y-%m-%dT%H:%M:%S')
self.accessDateStart = datetime.datetime.strptime(data['accessDateStart'][:19], '%Y-%m-%dT%H:%M:%S')
self.accessDateEnd = datetime.datetime.strptime(data['accessDateEnd'][:19], '%Y-%m-%dT%H:%M:%S')
self.accessTimeStart = datetime.datetime.strptime(data['accessTimeStart'][:19], '%Y-%m-%dT%H:%M:%S')
self.accessTimeEnd = datetime.datetime.strptime(data['accessTimeEnd'][:19], '%Y-%m-%dT%H:%M:%S')
self.accessTimeEnd = datetime.datetime.strptime(data['accessTimeEnd'][:19], '%Y-%m-%dT%H:%M:%S')
self.budget = data['budget']
def __repr__(self):
return '<User %r>' % self.email
def __init__(self, email, password, firstName, lastName, role = 0, phone='0', licenseMask =0, keyMask = 0, association = ''):
self.syncMaster = 0
self.active = 1
self.phone = phone
self.cardAuthBlock = 1
self.cardAuthSector = 4
self.cardID = ''
self.cardSecret = ''
self.cardAuthKeyA = ''
self.cardAuthKeyB = ''
self.role = role;
self.email = email
self.password = flask_bcrypt.generate_password_hash(password)
self.firstName = firstName
self.lastName = lastName
self.association = association
self.phone = phone
self.keyMask = keyMask
self.licenseMask = licenseMask
self.accessDaysMask = 127
self.accessType = 0
self.accessDayCounter = 10
self.accessDayCyclicBudget = 10
self.weeklyAccessAverage = 0
self.weeklyAccessWeekNumber = datetime.datetime.now().isocalendar()[1]
self.weeklyAccessCount = 0
self.monthlyAccessAverage = 0
self.monthlyAccessMonthNumber = datetime.datetime.now().month
self.monthlyAccessCount = 0
self.lastAccessDaysUpdateDate = (datetime.datetime.today()).replace(hour=0, minute=0, second=0, microsecond=0)
self.accessDateStart = (datetime.datetime.today()).replace(hour=0, minute=0, second=0, microsecond=0)
self.accessDateEnd = (datetime.datetime.today() + datetime.timedelta(365*15)).replace(hour=0,minute=0,second=0,microsecond=0)
self.accessTimeStart = datetime.datetime.today().replace(hour= 0, minute= 1, second=0, microsecond=0)
self.accessTimeEnd = datetime.datetime.today().replace(hour= 23, minute= 59, second=0, microsecond=0)
self.lastAccessDateTime = (datetime.datetime.today()).replace(hour=0, minute=0, second=0, microsecond=0)
self.lastLoginDateTime = datetime.datetime.today()
self.lastSyncDateTime = datetime.datetime.now()
self.registerDateTime = datetime.datetime.today()
self.budget = 0.00;
self.lastBudgetUpdateDate = (datetime.datetime.today()).replace(hour=0, minute=0, second=0, microsecond=0)
class Setting(db.Model):
WRITEABLE = 0x100
SETTINGTYPE_STRING = 1
SETTINGTYPE_INT = 2
SETTINGTYPE_FLOAT = 3
SETTINGTYPE_BOOL = 4
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
value = db.Column(db.Text)
type = db.Column(db.Integer)
def __init__(self, name,value,type):
self.name = name
self.type = type
self.value = value
class Action(db.Model):
ACTION_LOGONLY = 0
ACTION_OPENING_REQUEST = 1
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime)
nodeName = db.Column(db.Text)
userName = db.Column(db.Text)
userMail = db.Column(db.Text)
authType = db.Column(db.Integer)
authInfo = db.Column(db.Text)
logText = db.Column(db.Text)
logType = db.Column(db.Text)
logLevel = db.Column(db.Text)
action = db.Column(db.Integer)
actionParameter = db.Column(db.Integer)
rollbackPoint = db.Column(db.Integer)
synced = db.Column(db.Integer)
def __init__(self, date, nodeName, userName, userMail, logText, logType, logLevel, authType, authInfo, action = ACTION_LOGONLY, actionParameter = 0, rollbackpoint = -1):
self.date = date
self.nodeName = nodeName
self.userName = userName
self.userMail = userMail
self.logType = logType
self.logLevel = logLevel
self.logText = logText
self.authType = authType
self.authInfo = authInfo
self.synced = 0
self.action = action
self.actionParameter = actionParameter
self.rollbackPoint = rollbackpoint
class StatisticEntry(db.Model):
id = db.Column(db.Integer, primary_key=True)
statId = db.Column(db.Integer)
month = db.Column(db.Integer)
year = db.Column(db.Integer)
binningId = db.Column(db.Integer)
series = db.Column(db.Integer)
label = db.Column(db.Text)
value = db.Column(db.Float)
def __init__(self, statId, label, value, series, month, year, binningId):
self.statId = statId
self.label = label
self.value = value
self.series = series
self.month = month
self.year = year
self.binningId = binningId
class Statistic(db.Model):
STATTYPE_LINE_SERIES = 1
STATTYPE_BAR_SERIES = 2
STATTYPE_RADAR_SERIES = 3
STATTYPE_DOUGHNUT_CLASSES = 5
STATTYPE_RADAR_CLASSES = 6
STATTYPE_YEARLY_BAR_SERIES = 8
STATDISPLAY_CONFIG_SHOW_DESCRIPTION = 1
STATDISPLAY_CONFIG_NO_TOTAL = 2
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
displayConfig = db.Column(db.Integer)
description = db.Column(db.Text)
statId = db.Column(db.Integer)
statType = db.Column(db.Integer)
binningCount = db.Column(db.Integer)
seriesCount = db.Column(db.Integer)
seriesName1 = db.Column(db.Text)
seriesName2 = db.Column(db.Text)
seriesName3 = db.Column(db.Text)
seriesName4 = db.Column(db.Text)
seriesName5 = db.Column(db.Text)
seriesName6 = db.Column(db.Text)
seriesName7 = db.Column(db.Text)
seriesName8 = db.Column(db.Text)
def __init__(self, name, statId, statType, binningCount = 0, seriesCount = 0, description = '', displayConfig = 0, seriesName1 = '', seriesName2 = '', seriesName3 = '', seriesName4 = '', seriesName5 = '', seriesName6 = '', seriesName7 = '', seriesName8 = ''):
self.name = name
self.displayConfig = displayConfig
self.description = description
self.statId = statId
self.statType = statType
self.binningCount = binningCount
self.seriesCount = seriesCount
self.seriesName1 = seriesName1
self.seriesName2 = seriesName2
self.seriesName3 = seriesName3
self.seriesName4 = seriesName4
self.seriesName5 = seriesName5
self.seriesName6 = seriesName6
self.seriesName7 = seriesName7
self.seriesName8 = seriesName8
class Door(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
displayName = db.Column(db.Text)
keyMask = db.Column(db.Integer)
address = db.Column(db.Text)
local = db.Column(db.Integer)
password = db.Column(db.Text)
def __init__(self, name, displayName, keyMask, address, local, password = ''):
self.name = name
self.displayName = displayName
self.keyMask = keyMask
self.address = address
self.local = local
self.password = base64.b64encode(password)
class RfidTagInfo(object):
def __init__(self, tagId, userInfo):
self.userInfo = userInfo
self.tagId = tagId
| gpl-3.0 |
rosmo/ansible | test/units/modules/remote_management/lxca/test_lxca_nodes.py | 34 | 4821 | import json
import pytest
from units.compat import mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
from ansible.modules.remote_management.lxca import lxca_nodes
from ansible.module_utils.remote_management.lxca.common import setup_conn
from ansible.module_utils.remote_management.lxca.common import close_conn
@pytest.fixture(scope='module')
@mock.patch("ansible.module_utils.remote_management.lxca.common.close_conn", autospec=True)
def setup_module(close_conn):
close_conn.return_value = True
class TestMyModule():
@pytest.mark.parametrize('patch_ansible_module',
[
{},
{
"auth_url": "https://10.240.14.195",
"login_user": "USERID",
},
{
"auth_url": "https://10.240.14.195",
"login_password": "Password",
},
{
"login_user": "USERID",
"login_password": "Password",
},
],
indirect=['patch_ansible_module'])
@pytest.mark.usefixtures('patch_ansible_module')
@mock.patch("ansible.module_utils.remote_management.lxca.common.setup_conn", autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_nodes.execute_module", autospec=True)
def test_without_required_parameters(self, _setup_conn, _execute_module,
mocker, capfd, setup_module):
"""Failure must occurs when all parameters are missing"""
with pytest.raises(SystemExit):
_setup_conn.return_value = "Fake connection"
_execute_module.return_value = "Fake execution"
lxca_nodes.main()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert 'missing required arguments' in results['msg']
@mock.patch("ansible.module_utils.remote_management.lxca.common.setup_conn", autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_nodes.execute_module", autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_nodes.AnsibleModule", autospec=True)
def test__argument_spec(self, ansible_mod_cls, _execute_module, _setup_conn, setup_module):
expected_arguments_spec = dict(
login_user=dict(required=True),
login_password=dict(required=True, no_log=True),
command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid',
'nodes_by_chassis_uuid',
'nodes_status_managed',
'nodes_status_unmanaged']),
auth_url=dict(required=True),
uuid=dict(default=None),
chassis=dict(default=None),
)
_setup_conn.return_value = "Fake connection"
_execute_module.return_value = []
mod_obj = ansible_mod_cls.return_value
args = {
"auth_url": "https://10.243.30.195",
"login_user": "USERID",
"login_password": "password",
"command_options": "nodes",
}
mod_obj.params = args
lxca_nodes.main()
assert(mock.call(argument_spec=expected_arguments_spec,
supports_check_mode=False) == ansible_mod_cls.call_args)
@mock.patch("ansible.module_utils.remote_management.lxca.common.setup_conn", autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_nodes._nodes_by_uuid",
autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_nodes.AnsibleModule",
autospec=True)
def test__nodes_empty_list(self, ansible_mod_cls, _get_nodes, _setup_conn, setup_module):
mod_obj = ansible_mod_cls.return_value
args = {
"auth_url": "https://10.243.30.195",
"login_user": "USERID",
"login_password": "password",
"uuid": "3C737AA5E31640CE949B10C129A8B01F",
"command_options": "nodes_by_uuid",
}
mod_obj.params = args
_setup_conn.return_value = "Fake connection"
empty_nodes_list = []
_get_nodes.return_value = empty_nodes_list
ret_nodes = _get_nodes(mod_obj, args)
assert mock.call(mod_obj, mod_obj.params) == _get_nodes.call_args
assert _get_nodes.return_value == ret_nodes
| gpl-3.0 |
Zac-HD/home-assistant | tests/components/sensor/test_sleepiq.py | 5 | 1671 | """The tests for SleepIQ sensor platform."""
import unittest
from unittest.mock import MagicMock
import requests_mock
from homeassistant.components.sensor import sleepiq
from tests.components.test_sleepiq import mock_responses
from tests.common import get_test_home_assistant
class TestSleepIQSensorSetup(unittest.TestCase):
"""Tests the SleepIQ Sensor platform."""
DEVICES = []
def add_devices(self, devices):
"""Mock add devices."""
for device in devices:
self.DEVICES.append(device)
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
self.username = 'foo'
self.password = 'bar'
self.config = {
'username': self.username,
'password': self.password,
}
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@requests_mock.Mocker()
def test_setup(self, mock):
"""Test for successfully setting up the SleepIQ platform."""
mock_responses(mock)
sleepiq.setup_platform(self.hass,
self.config,
self.add_devices,
MagicMock())
self.assertEqual(2, len(self.DEVICES))
left_side = self.DEVICES[1]
self.assertEqual('SleepNumber ILE Test1 SleepNumber', left_side.name)
self.assertEqual(40, left_side.state)
right_side = self.DEVICES[0]
self.assertEqual('SleepNumber ILE Test2 SleepNumber', right_side.name)
self.assertEqual(80, right_side.state)
| apache-2.0 |
scturtle/GoodTranslate | translate.py | 1 | 2130 | #!/usr/bin/env python
import json
try:
import urllib2 as request
from urllib import quote
except:
from urllib import request
from urllib.parse import quote
from tk import calc_tk
class Translator:
tran_table = [(',,,,', ',null,null,null,'), (',,,', ',null,null,'),
(',,', ',null,'), ('[,', '[null,'), (',]', ',null]')]
def __init__(self, to_lang, from_lang='auto'):
self.from_lang = from_lang
self.to_lang = to_lang
def translate(self, source):
s = self._get_json(source)
for pattern, res in self.tran_table:
s = s.replace(pattern, res)
j = json.loads(s)
return ''.join(sen[0] for sen in j[0])
def _get_json(self, source):
escaped_source = quote(source, '')
req = request.Request(
url=("http://translate.google.com/translate_a/single?"
"client=t&ie=UTF-8&oe=UTF-8&dt=t&sl=%s&tl=%s&q=%s&tk=%s"
) % (self.from_lang, self.to_lang, escaped_source, calc_tk(source)),
headers={'User-Agent': 'Mozilla/5.0'})
r = request.urlopen(req)
return r.read().decode('utf-8')
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('texts', metavar='text', nargs='+',
help='a string to translate'
'(use "" when it\'s a sentence)')
parser.add_argument('-t', '--to', dest='to_lang', type=str, default='zh',
help='To language (e.g. zh, zh-TW, en, ja, ko).'
' Default is zh.')
parser.add_argument('-f', '--from', dest='from_lang',
type=str, default='auto',
help='From language (e.g. zh, zh-TW, en, ja, ko).'
' Default is auto.')
args = parser.parse_args()
translator = Translator(from_lang=args.from_lang, to_lang=args.to_lang)
for text in args.texts:
translation = translator.translate(text)
sys.stdout.write(translation)
| unlicense |
boscoh/inmembrane | inmembrane/plugins/tatfind_web.py | 1 | 2816 | # -*- coding: utf-8 -*-
citation = {'ref': u"Rose, R.W., T. Brüser,. J. C. Kissinger, and M. "
u"Pohlschröder. 2002. Adaptation of protein secretion "
u"to extremely high salt concentrations by extensive use "
u"of the twin arginine translocation pathway. Mol. Microbiol."
u"5: 943-950 \n"
u"<http://dx.doi.org/10.1046/j.1365-2958.2002.03090.x>",
'name': "TatFind 1.4"
}
__DEBUG__ = False
import sys, os, time, StringIO
import twill
from twill.commands import find, formfile, follow, fv, go, show, \
showforms, showlinks, submit, agent
from inmembrane.helpers import log_stderr, parse_fasta_header
def parse_tatfind_output(output, proteins):
"""
Parses the TatFind HTML output (file-like object or a list of strings)
an uses it to annotate and return an associated 'proteins' data structure.
"""
for l in output:
if "Results for" in l:
seqid = l.split("Results for ")[1].split(":")[:-1][0]
# parse id string to bring it to our format
seqid, unused = parse_fasta_header(seqid)
# "TRUE" or "FALSE"
tat_pred = l.split("Results for ")[1].split(":")[-1:][0].strip()
if tat_pred == "TRUE":
proteins[seqid]["is_tatfind"] = True
else:
proteins[seqid]["is_tatfind"] = False
return proteins
def annotate(params, proteins, \
url="http://signalfind.org/tatfind.html", force=False):
"""
Interfaces with the TatFind web service at (http://signalfind.org/tatfind.html)
to predict if protein sequences contain Twin-Arginine Translocation (Tat)
signal peptides.
"""
# set the user-agent so web services can block us if they want ... :/
python_version = sys.version.split()[0]
agent("Python-urllib/%s (twill; inmembrane)" % python_version)
outfn = 'tatfind.out'
log_stderr("# TatFind(web) %s > %s" % (params['fasta'], outfn))
if not force and os.path.isfile(outfn):
log_stderr("# -> skipped: %s already exists" % outfn)
fh = open(outfn, 'r')
proteins = parse_tatfind_output(fh, proteins)
fh.close()
return proteins
# dump extraneous output into this blackhole so we don't see it
if not __DEBUG__: twill.set_output(StringIO.StringIO())
go(url)
if __DEBUG__: showforms()
formfile("1", "seqFile", params["fasta"])
submit()
if __DEBUG__: show()
tatfind_output = show()
if __DEBUG__: log_stderr(tatfind_output)
# write raw TatFind output to a file
fh = open(outfn, 'w')
fh.write(tatfind_output)
fh.close()
proteins = parse_tatfind_output(tatfind_output.split("\n"), proteins)
return proteins
| bsd-2-clause |
saleemjaveds/https-github.com-openstack-nova | nova/tests/virt/xenapi/test_vmops.py | 1 | 44637 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
import mock
from nova.compute import power_state
from nova.compute import task_states
from nova import exception
from nova import objects
from nova.pci import pci_manager
from nova import test
from nova.tests import fake_instance
from nova.tests.virt.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent as xenapi_agent
from nova.virt.xenapi.client import session as xenapi_session
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volumeops
class VMOpsTestBase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VMOpsTestBase, self).setUp()
self._setup_mock_vmops()
self.vms = []
def _setup_mock_vmops(self, product_brand=None, product_version=None):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
self._session = xenapi_session.XenAPISession('test_url', 'root',
'test_pass')
self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
def create_vm(self, name, state="Running"):
vm_ref = xenapi_fake.create_vm(name, state)
self.vms.append(vm_ref)
vm = xenapi_fake.get_record("VM", vm_ref)
return vm, vm_ref
def tearDown(self):
super(VMOpsTestBase, self).tearDown()
for vm in self.vms:
xenapi_fake.destroy_vm(vm)
class VMOpsTestCase(VMOpsTestBase):
def setUp(self):
super(VMOpsTestCase, self).setUp()
self._setup_mock_vmops()
def _setup_mock_vmops(self, product_brand=None, product_version=None):
self._session = self._get_mock_session(product_brand, product_version)
self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
def _get_mock_session(self, product_brand, product_version):
class Mock(object):
pass
mock_session = Mock()
mock_session.product_brand = product_brand
mock_session.product_version = product_version
return mock_session
def _test_finish_revert_migration_after_crash(self, backup_made, new_made,
vm_shutdown=True):
instance = {'name': 'foo',
'task_state': task_states.RESIZE_MIGRATING}
context = 'fake_context'
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(self._vmops, '_destroy')
self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label')
self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices')
self.mox.StubOutWithMock(self._vmops, '_start')
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
vm_utils.lookup(self._session, 'foo-orig').AndReturn(
backup_made and 'foo' or None)
vm_utils.lookup(self._session, 'foo').AndReturn(
(not backup_made or new_made) and 'foo' or None)
if backup_made:
if new_made:
self._vmops._destroy(instance, 'foo')
vm_utils.set_vm_name_label(self._session, 'foo', 'foo')
self._vmops._attach_mapped_block_devices(instance, [])
vm_utils.is_vm_shutdown(self._session, 'foo').AndReturn(vm_shutdown)
if vm_shutdown:
self._vmops._start(instance, 'foo')
self.mox.ReplayAll()
self._vmops.finish_revert_migration(context, instance, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(True, False)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(False, False)
def test_xsm_sr_check_relaxed_cached(self):
self.make_plugin_call_count = 0
def fake_make_plugin_call(plugin, method, **args):
self.make_plugin_call_count = self.make_plugin_call_count + 1
return "true"
self.stubs.Set(self._vmops, "_make_plugin_call",
fake_make_plugin_call)
self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
self.assertEqual(self.make_plugin_call_count, 1)
def test_get_vm_opaque_ref_raises_instance_not_found(self):
instance = {"name": "dummy"}
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(self._session, instance['name'], False).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceNotFound,
self._vmops._get_vm_opaque_ref, instance)
class InjectAutoDiskConfigTestCase(VMOpsTestBase):
def setUp(self):
super(InjectAutoDiskConfigTestCase, self).setUp()
def test_inject_auto_disk_config_when_present(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": True}
self.vmops._inject_auto_disk_config(instance, vm_ref)
xenstore_data = vm['xenstore_data']
self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'True')
def test_inject_auto_disk_config_none_as_false(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
self.vmops._inject_auto_disk_config(instance, vm_ref)
xenstore_data = vm['xenstore_data']
self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'False')
class GetConsoleOutputTestCase(VMOpsTestBase):
def setUp(self):
super(GetConsoleOutputTestCase, self).setUp()
def test_get_console_output_works(self):
self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
instance = {"name": "dummy"}
self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(42)
self.mox.ReplayAll()
self.assertEqual("dom_id: 42", self.vmops.get_console_output(instance))
def test_get_console_output_throws_nova_exception(self):
self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
instance = {"name": "dummy"}
# dom_id=0 used to trigger exception in fake XenAPI
self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(0)
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.vmops.get_console_output, instance)
def test_get_dom_id_works(self):
instance = {"name": "dummy"}
vm, vm_ref = self.create_vm("dummy")
self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance))
def test_get_dom_id_works_with_rescue_vm(self):
instance = {"name": "dummy"}
vm, vm_ref = self.create_vm("dummy-rescue")
self.assertEqual(vm["domid"],
self.vmops._get_dom_id(instance, check_rescue=True))
def test_get_dom_id_raises_not_found(self):
instance = {"name": "dummy"}
self.create_vm("not-dummy")
self.assertRaises(exception.NotFound, self.vmops._get_dom_id, instance)
def test_get_dom_id_works_with_vmref(self):
vm, vm_ref = self.create_vm("dummy")
self.assertEqual(vm["domid"],
self.vmops._get_dom_id(vm_ref=vm_ref))
class SpawnTestCase(VMOpsTestBase):
def _stub_out_common(self):
self.mox.StubOutWithMock(self.vmops, '_ensure_instance_name_unique')
self.mox.StubOutWithMock(self.vmops, '_ensure_enough_free_mem')
self.mox.StubOutWithMock(self.vmops, '_update_instance_progress')
self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type')
self.mox.StubOutWithMock(vm_utils, 'get_vdis_for_instance')
self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis')
self.mox.StubOutWithMock(self.vmops._volumeops,
'safe_cleanup_from_vdis')
self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis')
self.mox.StubOutWithMock(vm_utils,
'create_kernel_and_ramdisk')
self.mox.StubOutWithMock(vm_utils, 'destroy_kernel_ramdisk')
self.mox.StubOutWithMock(self.vmops, '_create_vm_record')
self.mox.StubOutWithMock(self.vmops, '_destroy')
self.mox.StubOutWithMock(self.vmops, '_attach_disks')
self.mox.StubOutWithMock(pci_manager, 'get_instance_pci_devs')
self.mox.StubOutWithMock(vm_utils, 'set_other_config_pci')
self.mox.StubOutWithMock(self.vmops, '_attach_orig_disks')
self.mox.StubOutWithMock(self.vmops, 'inject_network_info')
self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
self.mox.StubOutWithMock(self.vmops, '_inject_instance_metadata')
self.mox.StubOutWithMock(self.vmops, '_inject_auto_disk_config')
self.mox.StubOutWithMock(self.vmops, '_file_inject_vm_settings')
self.mox.StubOutWithMock(self.vmops, '_create_vifs')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'setup_basic_filtering')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'prepare_instance_filter')
self.mox.StubOutWithMock(self.vmops, '_start')
self.mox.StubOutWithMock(self.vmops, '_wait_for_instance_to_start')
self.mox.StubOutWithMock(self.vmops,
'_configure_new_instance_with_agent')
self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'apply_instance_filter')
def _test_spawn(self, name_label_param=None, block_device_info_param=None,
rescue=False, include_root_vdi=True, throw_exception=None,
attach_pci_dev=False):
self._stub_out_common()
instance = {"name": "dummy", "uuid": "fake_uuid"}
name_label = name_label_param
if name_label is None:
name_label = "dummy"
image_meta = {"id": "image_id"}
context = "context"
session = self.vmops._session
injected_files = "fake_files"
admin_password = "password"
network_info = "net_info"
steps = 10
if rescue:
steps += 1
block_device_info = block_device_info_param
if block_device_info and not block_device_info['root_device_name']:
block_device_info = dict(block_device_info_param)
block_device_info['root_device_name'] = \
self.vmops.default_root_dev
di_type = "di_type"
vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
step = 1
self.vmops._update_instance_progress(context, instance, step, steps)
vdis = {"other": {"ref": "fake_ref_2", "osvol": True}}
if include_root_vdi:
vdis["root"] = {"ref": "fake_ref"}
vm_utils.get_vdis_for_instance(context, session, instance, name_label,
"image_id", di_type,
block_device_info=block_device_info).AndReturn(vdis)
self.vmops._resize_up_vdis(instance, vdis)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
kernel_file = "kernel"
ramdisk_file = "ramdisk"
vm_utils.create_kernel_and_ramdisk(context, session,
instance, name_label).AndReturn((kernel_file, ramdisk_file))
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
vm_ref = "fake_vm_ref"
self.vmops._ensure_instance_name_unique(name_label)
self.vmops._ensure_enough_free_mem(instance)
self.vmops._create_vm_record(context, instance, name_label,
di_type, kernel_file,
ramdisk_file, image_meta).AndReturn(vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
network_info, rescue, admin_password, injected_files)
if attach_pci_dev:
fake_dev = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': '00:00.0',
'vendor_id': '1234',
'product_id': 'abcd',
'dev_type': 'type-PCI',
'status': 'available',
'dev_id': 'devid',
'label': 'label',
'instance_uuid': None,
'extra_info': '{}',
}
pci_manager.get_instance_pci_devs(instance).AndReturn([fake_dev])
vm_utils.set_other_config_pci(self.vmops._session,
vm_ref,
"0/0000:00:00.0")
else:
pci_manager.get_instance_pci_devs(instance).AndReturn([])
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._inject_instance_metadata(instance, vm_ref)
self.vmops._inject_auto_disk_config(instance, vm_ref)
self.vmops._inject_hostname(instance, vm_ref, rescue)
self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
network_info)
self.vmops.inject_network_info(instance, network_info, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._create_vifs(instance, vm_ref, network_info)
self.vmops.firewall_driver.setup_basic_filtering(instance,
network_info).AndRaise(NotImplementedError)
self.vmops.firewall_driver.prepare_instance_filter(instance,
network_info)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
if rescue:
self.vmops._attach_orig_disks(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step,
steps)
self.vmops._start(instance, vm_ref)
self.vmops._wait_for_instance_to_start(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._configure_new_instance_with_agent(instance, vm_ref,
injected_files, admin_password)
self.vmops._remove_hostname(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops.firewall_driver.apply_instance_filter(instance,
network_info)
step += 1
last_call = self.vmops._update_instance_progress(context, instance,
step, steps)
if throw_exception:
last_call.AndRaise(throw_exception)
self.vmops._destroy(instance, vm_ref, network_info=network_info)
vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
kernel_file, ramdisk_file)
vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"])
self.vmops._volumeops.safe_cleanup_from_vdis(["fake_ref_2"])
self.mox.ReplayAll()
self.vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info,
block_device_info_param, name_label_param, rescue)
def test_spawn(self):
self._test_spawn()
def test_spawn_with_alternate_options(self):
self._test_spawn(include_root_vdi=False, rescue=True,
name_label_param="bob",
block_device_info_param={"root_device_name": ""})
def test_spawn_with_pci_available_on_the_host(self):
self._test_spawn(attach_pci_dev=True)
def test_spawn_performs_rollback_and_throws_exception(self):
self.assertRaises(test.TestingException, self._test_spawn,
throw_exception=test.TestingException())
def _test_finish_migration(self, power_on=True, resize_instance=True,
throw_exception=None):
self._stub_out_common()
self.mox.StubOutWithMock(vm_utils, "import_all_migrated_disks")
self.mox.StubOutWithMock(self.vmops, "_attach_mapped_block_devices")
context = "context"
migration = {}
name_label = "dummy"
instance = {"name": name_label, "uuid": "fake_uuid"}
disk_info = "disk_info"
network_info = "net_info"
image_meta = {"id": "image_id"}
block_device_info = "bdi"
session = self.vmops._session
self.vmops._ensure_instance_name_unique(name_label)
self.vmops._ensure_enough_free_mem(instance)
di_type = "di_type"
vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
root_vdi = {"ref": "fake_ref"}
ephemeral_vdi = {"ref": "fake_ref_e"}
vdis = {"root": root_vdi, "ephemerals": {4: ephemeral_vdi}}
vm_utils.import_all_migrated_disks(self.vmops._session,
instance).AndReturn(vdis)
kernel_file = "kernel"
ramdisk_file = "ramdisk"
vm_utils.create_kernel_and_ramdisk(context, session,
instance, name_label).AndReturn((kernel_file, ramdisk_file))
vm_ref = "fake_vm_ref"
self.vmops._create_vm_record(context, instance, name_label,
di_type, kernel_file,
ramdisk_file, image_meta).AndReturn(vm_ref)
if resize_instance:
self.vmops._resize_up_vdis(instance, vdis)
self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
network_info, False, None, None)
self.vmops._attach_mapped_block_devices(instance, block_device_info)
pci_manager.get_instance_pci_devs(instance).AndReturn([])
self.vmops._inject_instance_metadata(instance, vm_ref)
self.vmops._inject_auto_disk_config(instance, vm_ref)
self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
network_info)
self.vmops.inject_network_info(instance, network_info, vm_ref)
self.vmops._create_vifs(instance, vm_ref, network_info)
self.vmops.firewall_driver.setup_basic_filtering(instance,
network_info).AndRaise(NotImplementedError)
self.vmops.firewall_driver.prepare_instance_filter(instance,
network_info)
if power_on:
self.vmops._start(instance, vm_ref)
self.vmops._wait_for_instance_to_start(instance, vm_ref)
self.vmops.firewall_driver.apply_instance_filter(instance,
network_info)
last_call = self.vmops._update_instance_progress(context, instance,
step=5, total_steps=5)
if throw_exception:
last_call.AndRaise(throw_exception)
self.vmops._destroy(instance, vm_ref, network_info=network_info)
vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
kernel_file, ramdisk_file)
vm_utils.safe_destroy_vdis(self.vmops._session,
["fake_ref_e", "fake_ref"])
self.mox.ReplayAll()
self.vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def test_finish_migration(self):
self._test_finish_migration()
def test_finish_migration_no_power_on(self):
self._test_finish_migration(power_on=False, resize_instance=False)
def test_finish_migrate_performs_rollback_on_error(self):
self.assertRaises(test.TestingException, self._test_finish_migration,
power_on=False, resize_instance=False,
throw_exception=test.TestingException())
def test_remove_hostname(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
self.mox.StubOutWithMock(self._session, 'call_xenapi')
self._session.call_xenapi("VM.remove_from_xenstore_data", vm_ref,
"vm-data/hostname")
self.mox.ReplayAll()
self.vmops._remove_hostname(instance, vm_ref)
self.mox.VerifyAll()
def test_reset_network(self):
class mock_agent(object):
def __init__(self):
self.called = False
def resetnetwork(self):
self.called = True
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
agent = mock_agent()
self.mox.StubOutWithMock(self.vmops, 'agent_enabled')
self.mox.StubOutWithMock(self.vmops, '_get_agent')
self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
self.vmops.agent_enabled(instance).AndReturn(True)
self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
self.vmops._inject_hostname(instance, vm_ref, False)
self.vmops._remove_hostname(instance, vm_ref)
self.mox.ReplayAll()
self.vmops.reset_network(instance)
self.assertTrue(agent.called)
self.mox.VerifyAll()
def test_inject_hostname(self):
instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'dummy')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=False)
def test_inject_hostname_with_rescue_prefix(self):
instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
'RESCUE-dummy')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=True)
def test_inject_hostname_with_windows_name_truncation(self):
instance = {"hostname": "dummydummydummydummydummy",
"os_type": "windows", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
'RESCUE-dummydum')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=True)
def test_wait_for_instance_to_start(self):
instance = {"uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(vm_utils, 'get_power_state')
self.mox.StubOutWithMock(greenthread, 'sleep')
vm_utils.get_power_state(self._session, vm_ref).AndReturn(
power_state.SHUTDOWN)
greenthread.sleep(0.5)
vm_utils.get_power_state(self._session, vm_ref).AndReturn(
power_state.RUNNING)
self.mox.ReplayAll()
self.vmops._wait_for_instance_to_start(instance, vm_ref)
def test_attach_orig_disks(self):
instance = {"name": "dummy"}
vm_ref = "vm_ref"
vbd_refs = {vmops.DEVICE_ROOT: "vdi_ref"}
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(self.vmops, '_find_vdi_refs')
self.mox.StubOutWithMock(vm_utils, 'create_vbd')
vm_utils.lookup(self.vmops._session, "dummy").AndReturn("ref")
self.vmops._find_vdi_refs("ref", exclude_volumes=True).AndReturn(
vbd_refs)
vm_utils.create_vbd(self.vmops._session, vm_ref, "vdi_ref",
vmops.DEVICE_RESCUE, bootable=False)
self.mox.ReplayAll()
self.vmops._attach_orig_disks(instance, vm_ref)
def test_agent_update_setup(self):
# agent updates need to occur after networking is configured
instance = {'name': 'betelgeuse',
'uuid': '1-2-3-4-5-6'}
vm_ref = 'vm_ref'
agent = xenapi_agent.XenAPIBasedAgent(self.vmops._session,
self.vmops._virtapi, instance, vm_ref)
self.mox.StubOutWithMock(xenapi_agent, 'should_use_agent')
self.mox.StubOutWithMock(self.vmops, '_get_agent')
self.mox.StubOutWithMock(agent, 'get_version')
self.mox.StubOutWithMock(agent, 'resetnetwork')
self.mox.StubOutWithMock(agent, 'update_if_needed')
xenapi_agent.should_use_agent(instance).AndReturn(True)
self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
agent.get_version().AndReturn('1.2.3')
agent.resetnetwork()
agent.update_if_needed('1.2.3')
self.mox.ReplayAll()
self.vmops._configure_new_instance_with_agent(instance, vm_ref,
None, None)
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(vmops.VMOps, '_detach_block_devices_from_orig_vm')
@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_down')
@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up')
class MigrateDiskAndPowerOffTestCase(VMOpsTestBase):
def test_migrate_disk_and_power_off_works_down(self,
migrate_up, migrate_down, *mocks):
instance = {"root_gb": 2, "ephemeral_gb": 0, "uuid": "uuid"}
flavor = {"root_gb": 1, "ephemeral_gb": 0}
self.vmops.migrate_disk_and_power_off(None, instance, None,
flavor, None)
self.assertFalse(migrate_up.called)
self.assertTrue(migrate_down.called)
def test_migrate_disk_and_power_off_works_up(self,
migrate_up, migrate_down, *mocks):
instance = {"root_gb": 1, "ephemeral_gb": 1, "uuid": "uuid"}
flavor = {"root_gb": 2, "ephemeral_gb": 2}
self.vmops.migrate_disk_and_power_off(None, instance, None,
flavor, None)
self.assertFalse(migrate_down.called)
self.assertTrue(migrate_up.called)
def test_migrate_disk_and_power_off_resize_down_ephemeral_fails(self,
migrate_up, migrate_down, *mocks):
instance = {"ephemeral_gb": 2}
flavor = {"ephemeral_gb": 1}
self.assertRaises(exception.ResizeError,
self.vmops.migrate_disk_and_power_off,
None, instance, None, flavor, None)
@mock.patch.object(vm_utils, 'migrate_vhd')
@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
@mock.patch.object(vm_utils, 'get_all_vdi_uuids_for_vm')
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
class MigrateDiskResizingUpTestCase(VMOpsTestBase):
def _fake_snapshot_attached_here(self, session, instance, vm_ref, label,
userdevice, post_snapshot_callback):
self.assertIsInstance(instance, dict)
if userdevice == '0':
self.assertEqual("vm_ref", vm_ref)
self.assertEqual("fake-snapshot", label)
yield ["leaf", "parent", "grandp"]
else:
leaf = userdevice + "-leaf"
parent = userdevice + "-parent"
yield [leaf, parent]
def test_migrate_disk_resizing_up_works_no_ephemeral(self,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = None
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance, "parent",
dest, sr_path, 1),
mock.call(self.vmops._session, instance, "grandp",
dest, sr_path, 2),
mock.call(self.vmops._session, instance, "leaf",
dest, sr_path, 0)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
def test_migrate_disk_resizing_up_works_with_two_ephemeral(self,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance,
"parent", dest, sr_path, 1),
mock.call(self.vmops._session, instance,
"grandp", dest, sr_path, 2),
mock.call(self.vmops._session, instance,
"4-parent", dest, sr_path, 1, 1),
mock.call(self.vmops._session, instance,
"5-parent", dest, sr_path, 1, 2),
mock.call(self.vmops._session, instance,
"leaf", dest, sr_path, 0),
mock.call(self.vmops._session, instance,
"4-leaf", dest, sr_path, 0, 1),
mock.call(self.vmops._session, instance,
"5-leaf", dest, sr_path, 0, 2)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
@mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan')
def test_migrate_disk_resizing_up_rollback(self,
mock_restore,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd):
context = "ctxt"
instance = {"name": "fake", "uuid": "fake"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_migrate_vhd.side_effect = test.TestingException
mock_restore.side_effect = test.TestingException
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.assertRaises(exception.InstanceFaultRollback,
self.vmops._migrate_disk_resizing_up,
context, instance, dest, vm_ref, sr_path)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_restore.assert_called_once_with(instance)
mock_migrate_vhd.assert_called_once_with(self.vmops._session,
instance, "parent", dest, sr_path, 1)
class CreateVMRecordTestCase(VMOpsTestBase):
@mock.patch.object(vm_utils, 'determine_vm_mode')
@mock.patch.object(vm_utils, 'get_vm_device_id')
@mock.patch.object(vm_utils, 'create_vm')
def test_create_vm_record_with_vm_device_id(self, mock_create_vm,
mock_get_vm_device_id, mock_determine_vm_mode):
context = "context"
instance = objects.Instance(vm_mode="vm_mode", uuid="uuid123")
name_label = "dummy"
disk_image_type = "vhd"
kernel_file = "kernel"
ramdisk_file = "ram"
device_id = "0002"
image_properties = {"xenapi_device_id": device_id}
image_meta = {"properties": image_properties}
session = "session"
self.vmops._session = session
mock_get_vm_device_id.return_value = device_id
mock_determine_vm_mode.return_value = "vm_mode"
self.vmops._create_vm_record(context, instance, name_label,
disk_image_type, kernel_file, ramdisk_file, image_meta)
mock_get_vm_device_id.assert_called_with(session, image_properties)
mock_create_vm.assert_called_with(session, instance, name_label,
kernel_file, ramdisk_file, False, device_id)
class BootableTestCase(VMOpsTestBase):
def setUp(self):
super(BootableTestCase, self).setUp()
self.instance = {"name": "test", "uuid": "fake"}
vm_rec, self.vm_ref = self.create_vm('test')
# sanity check bootlock is initially disabled:
self.assertEqual({}, vm_rec['blocked_operations'])
def _get_blocked(self):
vm_rec = self._session.call_xenapi("VM.get_record", self.vm_ref)
return vm_rec['blocked_operations']
def test_acquire_bootlock(self):
self.vmops._acquire_bootlock(self.vm_ref)
blocked = self._get_blocked()
self.assertIn('start', blocked)
def test_release_bootlock(self):
self.vmops._acquire_bootlock(self.vm_ref)
self.vmops._release_bootlock(self.vm_ref)
blocked = self._get_blocked()
self.assertNotIn('start', blocked)
def test_set_bootable(self):
self.vmops.set_bootable(self.instance, True)
blocked = self._get_blocked()
self.assertNotIn('start', blocked)
def test_set_not_bootable(self):
self.vmops.set_bootable(self.instance, False)
blocked = self._get_blocked()
self.assertIn('start', blocked)
@mock.patch.object(vm_utils, 'update_vdi_virtual_size', autospec=True)
class ResizeVdisTestCase(VMOpsTestBase):
def test_dont_resize_root_volumes_osvol_false(self, mock_resize):
instance = fake_instance.fake_db_instance(root_gb=20)
vdis = {'root': {'osvol': False, 'ref': 'vdi_ref'}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertTrue(mock_resize.called)
def test_dont_resize_root_volumes_osvol_true(self, mock_resize):
instance = fake_instance.fake_db_instance(root_gb=20)
vdis = {'root': {'osvol': True}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
def test_dont_resize_root_volumes_no_osvol(self, mock_resize):
instance = fake_instance.fake_db_instance(root_gb=20)
vdis = {'root': {}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_ensure_ephemeral_resize_with_root_volume(self, mock_sizes,
mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = fake_instance.fake_db_instance(root_gb=20, ephemeral_gb=20)
ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
vdis = {'root': {'osvol': True, 'ref': 'vdi_ref'},
'ephemerals': ephemerals}
with mock.patch.object(vm_utils, 'generate_single_ephemeral',
autospec=True) as g:
self.vmops._resize_up_vdis(instance, vdis)
self.assertEqual([mock.call(self.vmops._session, instance, 4,
2000),
mock.call(self.vmops._session, instance, 5,
1000)],
mock_resize.call_args_list)
self.assertFalse(g.called)
def test_resize_up_vdis_root(self, mock_resize):
instance = {"root_gb": 20, "ephemeral_gb": 0}
self.vmops._resize_up_vdis(instance, {"root": {"ref": "vdi_ref"}})
mock_resize.assert_called_once_with(self.vmops._session, instance,
"vdi_ref", 20)
def test_resize_up_vdis_zero_disks(self, mock_resize):
instance = {"root_gb": 0, "ephemeral_gb": 0}
self.vmops._resize_up_vdis(instance, {"root": {}})
self.assertFalse(mock_resize.called)
def test_resize_up_vdis_no_vdis_like_initial_spawn(self, mock_resize):
instance = {"root_gb": 0, "ephemeral_gb": 3000}
vdis = {}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_resize_up_vdis_ephemeral(self, mock_sizes, mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = {"root_gb": 0, "ephemeral_gb": 3000}
ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
vdis = {"ephemerals": ephemerals}
self.vmops._resize_up_vdis(instance, vdis)
mock_sizes.assert_called_once_with(3000)
expected = [mock.call(self.vmops._session, instance, 4, 2000),
mock.call(self.vmops._session, instance, 5, 1000)]
self.assertEqual(expected, mock_resize.call_args_list)
@mock.patch.object(vm_utils, 'generate_single_ephemeral')
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes,
mock_generate,
mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = {"root_gb": 0, "ephemeral_gb": 3000, "uuid": "a"}
ephemerals = {"4": {"ref": 4}}
vdis = {"ephemerals": ephemerals}
self.vmops._resize_up_vdis(instance, vdis)
mock_sizes.assert_called_once_with(3000)
mock_resize.assert_called_once_with(self.vmops._session, instance,
4, 2000)
mock_generate.assert_called_once_with(self.vmops._session, instance,
None, 5, 1000)
@mock.patch.object(vm_utils, 'remove_old_snapshots')
class CleanupFailedSnapshotTestCase(VMOpsTestBase):
def test_post_interrupted_snapshot_cleanup(self, mock_remove):
self.vmops._get_vm_opaque_ref = mock.Mock()
self.vmops._get_vm_opaque_ref.return_value = "vm_ref"
self.vmops.post_interrupted_snapshot_cleanup("context", "instance")
mock_remove.assert_called_once_with(self.vmops._session,
"instance", "vm_ref")
class LiveMigrateHelperTestCase(VMOpsTestBase):
def test_connect_block_device_volumes_none(self):
self.assertEqual({}, self.vmops.connect_block_device_volumes(None))
@mock.patch.object(volumeops.VolumeOps, "connect_volume")
def test_connect_block_device_volumes_calls_connect(self, mock_connect):
with mock.patch.object(self.vmops._session,
"call_xenapi") as mock_session:
mock_connect.return_value = ("sr_uuid", None)
mock_session.return_value = "sr_ref"
bdm = {"connection_info": "c_info"}
bdi = {"block_device_mapping": [bdm]}
result = self.vmops.connect_block_device_volumes(bdi)
self.assertEqual({'sr_uuid': 'sr_ref'}, result)
mock_connect.assert_called_once_with("c_info")
mock_session.assert_called_once_with("SR.get_by_uuid",
"sr_uuid")
@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, 'resize_disk')
@mock.patch.object(vm_utils, 'migrate_vhd')
@mock.patch.object(vm_utils, 'destroy_vdi')
class MigrateDiskResizingDownTestCase(VMOpsTestBase):
def test_migrate_disk_resizing_down_works_no_ephemeral(
self,
mock_destroy_vdi,
mock_migrate_vhd,
mock_resize_disk,
mock_get_vdi_for_vm_safely,
mock_update_instance_progress,
mock_apply_orig_vm_name_label,
mock_resize_ensure_vm_is_shutdown):
context = "ctx"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
instance_type = dict(root_gb=1)
old_vdi_ref = "old_ref"
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
mock_get_vdi_for_vm_safely.return_value = (old_vdi_ref, None)
mock_resize_disk.return_value = (new_vdi_ref, new_vdi_uuid)
self.vmops._migrate_disk_resizing_down(context, instance, dest,
instance_type, vm_ref, sr_path)
mock_get_vdi_for_vm_safely.assert_called_once_with(
self.vmops._session,
vm_ref)
mock_resize_ensure_vm_is_shutdown.assert_called_once_with(
instance, vm_ref)
mock_apply_orig_vm_name_label.assert_called_once_with(
instance, vm_ref)
mock_resize_disk.assert_called_once_with(
self.vmops._session,
instance,
old_vdi_ref,
instance_type)
mock_migrate_vhd.assert_called_once_with(
self.vmops._session,
instance,
new_vdi_uuid,
dest,
sr_path, 0)
mock_destroy_vdi.assert_called_once_with(
self.vmops._session,
new_vdi_ref)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected,
mock_update_instance_progress.call_args_list)
| apache-2.0 |
rimbalinux/MSISDNArea | docutils/parsers/rst/languages/he.py | 2 | 3567 | # Author: Meir Kriheli
# Id: $Id: he.py 4837 2006-12-26 09:59:41Z sfcben $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
English-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'\u05ea\u05e9\u05d5\u05de\u05ea \u05dc\u05d1': 'attention',
u'\u05d6\u05d4\u05d9\u05e8\u05d5\u05ea': 'caution',
u'\u05e1\u05db\u05e0\u05d4': 'danger',
u'\u05e9\u05d2\u05d9\u05d0\u05d4' : 'error',
u'\u05e8\u05de\u05d6': 'hint',
u'\u05d7\u05e9\u05d5\u05d1': 'important',
u'\u05d4\u05e2\u05e8\u05d4': 'note',
u'\u05d8\u05d9\u05e4': 'tip',
u'\u05d0\u05d6\u05d4\u05e8\u05d4': 'warning',
'admonition': 'admonition',
'sidebar': 'sidebar',
'topic': 'topic',
'line-block': 'line-block',
'parsed-literal': 'parsed-literal',
'rubric': 'rubric',
'epigraph': 'epigraph',
'highlights': 'highlights',
'pull-quote': 'pull-quote',
'compound': 'compound',
'container': 'container',
#'questions': 'questions',
'table': 'table',
'csv-table': 'csv-table',
'list-table': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
'meta': 'meta',
#'imagemap': 'imagemap',
u'\u05ea\u05de\u05d5\u05e0\u05d4': 'image',
'figure': 'figure',
'include': 'include',
'raw': 'raw',
'replace': 'replace',
'unicode': 'unicode',
'date': 'date',
u'\u05e1\u05d2\u05e0\u05d5\u05df': 'class',
'role': 'role',
'default-role': 'default-role',
'title': 'title',
u'\u05ea\u05d5\u05db\u05df': 'contents',
'sectnum': 'sectnum',
'section-numbering': 'sectnum',
'header': 'header',
'footer': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
'target-notes': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""English name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
'abbreviation': 'abbreviation',
'ab': 'abbreviation',
'acronym': 'acronym',
'ac': 'acronym',
'index': 'index',
'i': 'index',
u'\u05ea\u05d7\u05ea\u05d9': 'subscript',
'sub': 'subscript',
u'\u05e2\u05d9\u05dc\u05d9': 'superscript',
'sup': 'superscript',
'title-reference': 'title-reference',
'title': 'title-reference',
't': 'title-reference',
'pep-reference': 'pep-reference',
'pep': 'pep-reference',
'rfc-reference': 'rfc-reference',
'rfc': 'rfc-reference',
'emphasis': 'emphasis',
'strong': 'strong',
'literal': 'literal',
'named-reference': 'named-reference',
'anonymous-reference': 'anonymous-reference',
'footnote-reference': 'footnote-reference',
'citation-reference': 'citation-reference',
'substitution-reference': 'substitution-reference',
'target': 'target',
'uri-reference': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
'raw': 'raw',}
"""Mapping of English role names to canonical role names for interpreted text.
"""
| bsd-3-clause |
hehongliang/tensorflow | tensorflow/python/debug/lib/grpc_large_data_test.py | 15 | 8124 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sending large-size data through tfdbg grpc channels.
"Large-size data" includes large GraphDef protos and large Tensor protos.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.lib import grpc_debug_test_server
from tensorflow.python.debug.lib import session_debug_testlib
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import grpc_wrapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class LargeGraphAndLargeTensorsDebugTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
(cls.debug_server_port, cls.debug_server_url, _, cls.debug_server_thread,
cls.debug_server
) = grpc_debug_test_server.start_server_on_separate_thread(
dump_to_filesystem=False)
tf_logging.info("debug server url: %s", cls.debug_server_url)
@classmethod
def tearDownClass(cls):
cls.debug_server.stop_server().wait()
cls.debug_server_thread.join()
def tearDown(self):
ops.reset_default_graph()
self.debug_server.clear_data()
def testSendingLargeGraphDefsWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u = variables.VariableV1(42.0, name="original_u")
for _ in xrange(50 * 1000):
u = array_ops.identity(u)
sess.run(variables.global_variables_initializer())
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"original_u")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
self.assertAllClose(42.0, sess.run(u))
self.assertAllClose(
[42.0],
self.debug_server.debug_tensor_values["original_u:0:DebugIdentity"])
self.assertEqual(2 if test.is_gpu_available() else 1,
len(self.debug_server.partition_graph_defs))
max_graph_def_size = max([
len(graph_def.SerializeToString())
for graph_def in self.debug_server.partition_graph_defs])
self.assertGreater(max_graph_def_size, 4 * 1024 * 1024)
def testSendingLargeFloatTensorWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init_val_array = list(xrange(1200 * 1024))
# Size: 4 * 1200 * 1024 = 4800k > 4M
u_init = constant_op.constant(
u_init_val_array, dtype=dtypes.float32, name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds # Unused by this watch_fn.
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
self.assertAllEqual(
u_init_val_array,
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
def testSendingStringTensorWithAlmostTooLargeStringsWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init_val = [
b"", b"spam", b"A" * 2500 * 1024, b"B" * 2500 * 1024, b"egg", b""]
u_init = constant_op.constant(
u_init_val, dtype=dtypes.string, name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
self.assertAllEqual(
u_init_val,
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
def testSendingLargeStringTensorWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
strs_total_size_threshold = 5000 * 1024
cum_size = 0
u_init_val_array = []
while cum_size < strs_total_size_threshold:
strlen = np.random.randint(200)
u_init_val_array.append(b"A" * strlen)
cum_size += strlen
u_init = constant_op.constant(
u_init_val_array, dtype=dtypes.string, name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
self.assertAllEqual(
u_init_val_array,
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
def testSendingEmptyFloatTensorWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init = constant_op.constant(
[], dtype=dtypes.float32, shape=[0], name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
u_init_value = self.debug_server.debug_tensor_values[
"u_init:0:DebugIdentity"][0]
self.assertEqual(np.float32, u_init_value.dtype)
self.assertEqual(0, len(u_init_value))
def testSendingEmptyStringTensorWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init = constant_op.constant(
[], dtype=dtypes.string, shape=[0], name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_whitelist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
u_init_value = self.debug_server.debug_tensor_values[
"u_init:0:DebugIdentity"][0]
self.assertEqual(np.object, u_init_value.dtype)
self.assertEqual(0, len(u_init_value))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
chillbear/django-prbac | docs/conf.py | 1 | 7887 | # -*- coding: utf-8 -*-
#
# django-prbac documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 29 17:06:26 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_prbac.mock_settings'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-prbac'
copyright = u'2013, Dimagi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-prbacdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-prbac.tex', u'django-prbac Documentation',
u'Dimagi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-prbac', u'django-prbac Documentation',
[u'Dimagi'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-prbac', u'django-prbac Documentation',
u'Dimagi', 'django-prbac', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-3-clause |
as110/as110.github.io | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/styles/murphy.py | 364 | 2751 | # -*- coding: utf-8 -*-
"""
pygments.styles.murphy
~~~~~~~~~~~~~~~~~~~~~~
Murphy's style from CodeRay.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class MurphyStyle(Style):
"""
Murphy's style from CodeRay.
"""
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "#666 italic",
Comment.Preproc: "#579 noitalic",
Comment.Special: "#c00 bold",
Keyword: "bold #289",
Keyword.Pseudo: "#08f",
Keyword.Type: "#66f",
Operator: "#333",
Operator.Word: "bold #000",
Name.Builtin: "#072",
Name.Function: "bold #5ed",
Name.Class: "bold #e9e",
Name.Namespace: "bold #0e84b5",
Name.Exception: "bold #F00",
Name.Variable: "#036",
Name.Variable.Instance: "#aaf",
Name.Variable.Class: "#ccf",
Name.Variable.Global: "#f84",
Name.Constant: "bold #5ed",
Name.Label: "bold #970",
Name.Entity: "#800",
Name.Attribute: "#007",
Name.Tag: "#070",
Name.Decorator: "bold #555",
String: "bg:#e0e0ff",
String.Char: "#88F bg:",
String.Doc: "#D42 bg:",
String.Interpol: "bg:#eee",
String.Escape: "bold #666",
String.Regex: "bg:#e0e0ff #000",
String.Symbol: "#fc8 bg:",
String.Other: "#f88",
Number: "bold #60E",
Number.Integer: "bold #66f",
Number.Float: "bold #60E",
Number.Hex: "bold #058",
Number.Oct: "bold #40E",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #c65d09",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "#F00 bg:#FAA"
}
| mit |
jonpry/linux_t100 | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
kain88-de/mdanalysis | testsuite/MDAnalysisTests/core/test_universe.py | 1 | 21671 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import absolute_import
from six.moves import cPickle
import os
try:
from cStringIO import StringIO
except:
from io import StringIO
from MDAnalysisTests.tempdir import TempDir
import numpy as np
from numpy.testing import (
dec,
assert_,
assert_allclose,
assert_almost_equal,
assert_equal,
assert_raises,
)
from nose.plugins.attrib import attr
from MDAnalysisTests import make_Universe
from MDAnalysisTests.datafiles import (
PSF, DCD,
PSF_BAD,
PDB_small,
PDB_chainidrepeat,
GRO, TRR,
two_water_gro, two_water_gro_nonames,
TRZ, TRZ_psf,
)
from MDAnalysisTests import parser_not_found
import MDAnalysis as mda
import MDAnalysis.coordinates
from MDAnalysis.topology.base import TopologyReaderBase
class IOErrorParser(TopologyReaderBase):
def parse(self):
raise IOError("Useful information")
# This string is not in the `TestUniverseCreation` class or its method because of problems
# with whitespace. Extra indentations make the string unreadable.
CHOL_GRO = """\
Single cholesterol molecule
8
153CHOL ROH 1793 6.558 2.936 4.005 -0.1044 -0.1252 0.0966
153CHOL R1 1794 6.591 2.999 4.279 0.0998 0.1297 0.0158
153CHOL R2 1795 6.657 2.810 4.469 0.0780 0.2817 0.1592
153CHOL R3 1796 6.859 2.983 4.524 0.2233 0.2112 -0.1215
153CHOL R4 1797 6.804 2.849 4.779 0.4156 0.3232 0.0001
153CHOL R5 1798 6.810 3.064 4.744 0.4811 0.3182 -0.0905
153CHOL C1 1799 7.080 3.034 5.012 0.7486 0.2811 -0.3559
153CHOL C2 1800 6.993 3.163 5.284 0.3677 -0.2104 -0.0829
10 10 10
"""
class TestUniverseCreation(object):
# tests concerning Universe creation and errors encountered
@staticmethod
def test_load():
# Universe(top, trj)
u = mda.Universe(PSF, PDB_small)
assert_equal(len(u.atoms), 3341, "Loading universe failed somehow")
@staticmethod
def test_load_topology_stringio():
u = mda.Universe(StringIO(CHOL_GRO), format='GRO')
assert_equal(len(u.atoms), 8, "Loading universe from StringIO failed somehow")
assert_equal(u.trajectory.ts.positions[0], np.array([65.580002, 29.360001, 40.050003], dtype=np.float32))
@staticmethod
def test_load_trajectory_stringio():
u = mda.Universe(StringIO(CHOL_GRO), StringIO(CHOL_GRO), format='GRO', topology_format='GRO')
assert_equal(len(u.atoms), 8, "Loading universe from StringIO failed somehow")
@staticmethod
def test_make_universe_no_args():
# universe creation without args should work
u = mda.Universe()
assert_(isinstance(u, mda.Universe))
assert_(u.atoms == None)
@staticmethod
def test_make_universe_stringio_no_format():
# Loading from StringIO without format arg should raise TypeError
assert_raises(TypeError, mda.Universe, StringIO(CHOL_GRO))
@staticmethod
def test_Universe_no_trajectory_AE():
# querying trajectory without a trajectory loaded (only topology)
u = make_Universe()
assert_raises(AttributeError, getattr, u, 'trajectory')
@staticmethod
def test_Universe_topology_unrecognizedformat_VE():
assert_raises(ValueError, mda.Universe, 'some.weird.not.pdb.but.converted.xtc')
@staticmethod
def test_Universe_topology_unrecognizedformat_VE_msg():
try:
mda.Universe('some.weird.not.pdb.but.converted.xtc')
except ValueError as e:
assert_('isn\'t a valid topology format' in e.args[0])
else:
raise AssertionError
@staticmethod
def test_Universe_topology_IE():
assert_raises(IOError,
mda.Universe, 'thisfile', topology_format=IOErrorParser)
@staticmethod
def test_Universe_topology_IE_msg():
# should get the original error, as well as Universe error
try:
mda.Universe('thisfile', topology_format=IOErrorParser)
except IOError as e:
assert_('Failed to load from the topology file' in e.args[0])
assert_('Useful information' in e.args[0])
else:
raise AssertionError
@staticmethod
def test_Universe_filename_IE_msg():
# check for non existent file
try:
mda.Universe('thisfile.xml')
except IOError as e:
assert_equal('No such file or directory', e.strerror)
else:
raise AssertionError
@staticmethod
def test_Universe_invalidfile_IE_msg():
# check for invalid file (something with the wrong content)
temp_dir = TempDir()
with open(os.path.join(temp_dir.name, 'invalid.file.tpr'), 'w') as temp_file:
temp_file.write('plop')
try:
mda.Universe(os.path.join(temp_dir.name, 'invalid.file.tpr'))
except IOError as e:
assert_('file or cannot be recognized' in e.args[0])
else:
raise AssertionError
finally:
temp_dir.dissolve()
@staticmethod
def test_Universe_invalidpermissionfile_IE_msg():
# check for file with invalid permissions (eg. no read access)
temp_dir = TempDir()
temp_file = os.path.join(temp_dir.name, 'permission.denied.tpr')
with open(temp_file, 'w'):
pass
os.chmod(temp_file, 0o200)
try:
mda.Universe(os.path.join(temp_dir.name, 'permission.denied.tpr'))
except IOError as e:
assert_('Permission denied' in e.strerror)
else:
raise AssertionError
finally:
temp_dir.dissolve()
@staticmethod
def test_load_new_VE():
u = mda.Universe()
assert_raises(TypeError,
u.load_new, 'thisfile', format='soup')
@staticmethod
def test_universe_kwargs():
u = mda.Universe(PSF, PDB_small, fake_kwarg=True)
assert_equal(len(u.atoms), 3341, "Loading universe failed somehow")
assert_(u.kwargs['fake_kwarg'] is True)
# initialize new universe from pieces of existing one
u2 = mda.Universe(u.filename, u.trajectory.filename,
**u.kwargs)
assert_(u2.kwargs['fake_kwarg'] is True)
assert_equal(u.kwargs, u2.kwargs)
@staticmethod
def test_universe_topology_class_with_coords():
u = mda.Universe(PSF, PDB_small)
u2 = mda.Universe(u._topology, PDB_small)
assert_(isinstance(u2.trajectory, type(u.trajectory)))
assert_equal(u.trajectory.n_frames, u2.trajectory.n_frames)
assert_(u2._topology is u._topology)
class TestUniverse(object):
# older tests, still useful
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_load_bad_topology(self):
# tests that Universe builds produce the right error message
def bad_load():
return mda.Universe(PSF_BAD, DCD)
assert_raises(ValueError, bad_load)
@attr('issue')
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_load_new(self):
u = mda.Universe(PSF, DCD)
u.load_new(PDB_small)
assert_equal(len(u.trajectory), 1, "Failed to load_new(PDB)")
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_load_new_TypeError(self):
u = mda.Universe(PSF, DCD)
def bad_load(uni):
return uni.load_new('filename.notarealextension')
assert_raises(TypeError, bad_load, u)
def test_load_structure(self):
# Universe(struct)
ref = mda.Universe(PSF, PDB_small)
u = mda.Universe(PDB_small)
assert_equal(len(u.atoms), 3341, "Loading universe failed somehow")
assert_almost_equal(u.atoms.positions, ref.atoms.positions)
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_load_multiple_list(self):
# Universe(top, [trj, trj, ...])
ref = mda.Universe(PSF, DCD)
u = mda.Universe(PSF, [DCD, DCD])
assert_equal(len(u.atoms), 3341, "Loading universe failed somehow")
assert_equal(u.trajectory.n_frames, 2 * ref.trajectory.n_frames)
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_load_multiple_args(self):
# Universe(top, trj, trj, ...)
ref = mda.Universe(PSF, DCD)
u = mda.Universe(PSF, DCD, DCD)
assert_equal(len(u.atoms), 3341, "Loading universe failed somehow")
assert_equal(u.trajectory.n_frames, 2 * ref.trajectory.n_frames)
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_pickle_raises_NotImplementedError(self):
u = mda.Universe(PSF, DCD)
assert_raises(NotImplementedError, cPickle.dumps, u, protocol=cPickle.HIGHEST_PROTOCOL)
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_set_dimensions(self):
u = mda.Universe(PSF, DCD)
box = np.array([10, 11, 12, 90, 90, 90])
u.dimensions = np.array([10, 11, 12, 90, 90, 90])
assert_allclose(u.dimensions, box)
def test_chainid_quick_select():
# check that chainIDs get grouped together when making the quick selectors
# this pdb file has 2 segments with chainID A
u = mda.Universe(PDB_chainidrepeat)
for sg in (u.A, u.B):
assert_(isinstance(sg, mda.core.groups.SegmentGroup))
for seg in (u.C, u.D):
assert_(isinstance(seg, mda.core.groups.Segment))
assert_(len(u.A.atoms) == 10)
assert_(len(u.B.atoms) == 10)
assert_(len(u.C.atoms) == 5)
assert_(len(u.D.atoms) == 7)
class TestGuessBonds(object):
"""Test the AtomGroup methed guess_bonds
This needs to be done both from Universe creation (via kwarg) and AtomGroup
It needs to:
- work if all atoms are in vdwradii table
- fail properly if not
- work again if vdwradii are passed.
"""
def setUp(self):
self.vdw = {'A':1.05, 'B':0.4}
def tearDown(self):
del self.vdw
def _check_universe(self, u):
"""Verify that the Universe is created correctly"""
assert_equal(len(u.bonds), 4)
assert_equal(len(u.angles), 2)
assert_equal(len(u.dihedrals), 0)
assert_equal(len(u.atoms[0].bonds), 2)
assert_equal(len(u.atoms[1].bonds), 1)
assert_equal(len(u.atoms[2].bonds), 1)
assert_equal(len(u.atoms[3].bonds), 2)
assert_equal(len(u.atoms[4].bonds), 1)
assert_equal(len(u.atoms[5].bonds), 1)
assert_('guess_bonds' in u.kwargs)
def test_universe_guess_bonds(self):
"""Test that making a Universe with guess_bonds works"""
u = mda.Universe(two_water_gro, guess_bonds=True)
self._check_universe(u)
assert_(u.kwargs['guess_bonds'] is True)
def test_universe_guess_bonds_no_vdwradii(self):
"""Make a Universe that has atoms with unknown vdwradii."""
assert_raises(ValueError, mda.Universe, two_water_gro_nonames, guess_bonds=True)
def test_universe_guess_bonds_with_vdwradii(self):
"""Unknown atom types, but with vdw radii here to save the day"""
u = mda.Universe(two_water_gro_nonames, guess_bonds=True,
vdwradii=self.vdw)
self._check_universe(u)
assert_(u.kwargs['guess_bonds'] is True)
assert_equal(self.vdw, u.kwargs['vdwradii'])
def test_universe_guess_bonds_off(self):
u = mda.Universe(two_water_gro_nonames, guess_bonds=False)
for attr in ('bonds', 'angles', 'dihedrals'):
assert_(not hasattr(u, attr))
assert_(u.kwargs['guess_bonds'] is False)
def _check_atomgroup(self, ag, u):
"""Verify that the AtomGroup made bonds correctly,
and that the Universe got all this info
"""
assert_equal(len(ag.bonds), 2)
assert_equal(len(ag.angles), 1)
assert_equal(len(ag.dihedrals), 0)
assert_equal(len(u.bonds), 2)
assert_equal(len(u.angles), 1)
assert_equal(len(u.dihedrals), 0)
assert_equal(len(u.atoms[0].bonds), 2)
assert_equal(len(u.atoms[1].bonds), 1)
assert_equal(len(u.atoms[2].bonds), 1)
assert_equal(len(u.atoms[3].bonds), 0)
assert_equal(len(u.atoms[4].bonds), 0)
assert_equal(len(u.atoms[5].bonds), 0)
def test_atomgroup_guess_bonds(self):
"""Test an atomgroup doing guess bonds"""
u = mda.Universe(two_water_gro)
ag = u.atoms[:3]
ag.guess_bonds()
self._check_atomgroup(ag, u)
def test_atomgroup_guess_bonds_no_vdwradii(self):
u = mda.Universe(two_water_gro_nonames)
ag = u.atoms[:3]
assert_raises(ValueError, ag.guess_bonds)
def test_atomgroup_guess_bonds_with_vdwradii(self):
u = mda.Universe(two_water_gro_nonames)
ag = u.atoms[:3]
ag.guess_bonds(vdwradii=self.vdw)
self._check_atomgroup(ag, u)
class TestInMemoryUniverse(object):
@staticmethod
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_reader_w_timeseries():
universe = mda.Universe(PSF, DCD, in_memory=True)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 98, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_reader_wo_timeseries():
universe = mda.Universe(GRO, TRR, in_memory=True)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(47681, 10, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_reader_w_timeseries_frame_interval():
universe = mda.Universe(PSF, DCD, in_memory=True,
in_memory_step=10)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 10, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_reader_wo_timeseries_frame_interval():
universe = mda.Universe(GRO, TRR, in_memory=True,
in_memory_step=3)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(47681, 4, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_existing_universe():
universe = mda.Universe(PDB_small, DCD)
universe.transfer_to_memory()
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 98, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_frame_interval_convention():
universe1 = mda.Universe(PSF, DCD)
array1 = universe1.trajectory.timeseries(skip=10)
universe2 = mda.Universe(PSF, DCD, in_memory=True,
in_memory_step=10)
array2 = universe2.trajectory.timeseries()
assert_equal(array1, array2,
err_msg="Unexpected differences between arrays.")
@staticmethod
def test_slicing_with_start_stop():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(start=10, stop=20)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 10, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_slicing_without_start():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(stop=10)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 10, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_slicing_without_stop():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(start=10)
print(universe.trajectory.timeseries(universe.atoms).shape)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 88, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_slicing_step_without_start_stop():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(step=2)
print(universe.trajectory.timeseries(universe.atoms).shape)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 49, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_slicing_step_with_start_stop():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(start=10, stop=30, step=2)
print(universe.trajectory.timeseries(universe.atoms).shape)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 10, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_slicing_negative_start():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(start=-10)
print(universe.trajectory.timeseries(universe.atoms).shape)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 10, 3),
err_msg="Unexpected shape of trajectory timeseries")
@staticmethod
def test_slicing_negative_stop():
universe = MDAnalysis.Universe(PDB_small, DCD)
# Skip only the last frame
universe.transfer_to_memory(stop=-20)
print(universe.trajectory.timeseries(universe.atoms).shape)
assert_equal(universe.trajectory.timeseries(universe.atoms).shape,
(3341, 78, 3),
err_msg="Unexpected shape of trajectory timeseries")
class TestCustomReaders(object):
"""
Can pass a reader as kwarg on Universe creation
"""
@dec.skipif(parser_not_found('TRZ'),
'TRZ parser not available. Are you using python 3?')
def test_custom_reader(self):
# check that reader passing works
u = mda.Universe(TRZ_psf, TRZ, format=MDAnalysis.coordinates.TRZ.TRZReader)
assert_equal(len(u.atoms), 8184)
def test_custom_reader_singleframe(self):
T = MDAnalysis.topology.GROParser.GROParser
R = MDAnalysis.coordinates.GRO.GROReader
u = mda.Universe(two_water_gro, two_water_gro,
topology_format=T, format=R)
assert_equal(len(u.atoms), 6)
def test_custom_reader_singleframe_2(self):
# Same as before, but only one argument to Universe
T = MDAnalysis.topology.GROParser.GROParser
R = MDAnalysis.coordinates.GRO.GROReader
u = mda.Universe(two_water_gro,
topology_format=T, format=R)
assert_equal(len(u.atoms), 6)
@dec.skipif(parser_not_found('TRZ'),
'TRZ parser not available. Are you using python 3?')
def test_custom_parser(self):
# topology reader passing works
u = mda.Universe(TRZ_psf, TRZ, topology_format=MDAnalysis.topology.PSFParser.PSFParser)
assert_equal(len(u.atoms), 8184)
@dec.skipif(parser_not_found('TRZ'),
'TRZ parser not available. Are you using python 3?')
def test_custom_both(self):
# use custom for both
u = mda.Universe(TRZ_psf, TRZ, format=MDAnalysis.coordinates.TRZ.TRZReader,
topology_format=MDAnalysis.topology.PSFParser.PSFParser)
assert_equal(len(u.atoms), 8184)
| gpl-2.0 |
S2R2/viper | viper/modules/pymacho/MachOSymtabCommand.py | 6 | 2937 | # encoding: utf-8
"""
Copyright 2013 Jérémie BOUTOILLE
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from struct import unpack, pack
from viper.modules.pymacho.MachOLoadCommand import MachOLoadCommand
from viper.modules.pymacho.MachONList import MachONList
from viper.modules.pymacho.Utils import green
class MachOSymtabCommand(MachOLoadCommand):
symoff = 0
nsyms = 0
stroff = 0
strsize = 0
syms = None
strs = None
def __init__(self, macho_file=None, cmd=0, is_64=False):
self.cmd = cmd
self.syms = []
self.strs = []
self.is_64 = is_64
if macho_file is not None:
self.parse(macho_file)
def parse(self, macho_file):
self.symoff, self.nsyms = unpack('<II', macho_file.read(4*2))
self.stroff, self.strsize = unpack('<II', macho_file.read(4*2))
before = macho_file.tell()
# parse symoff
macho_file.seek(self.symoff)
for i in range(self.nsyms):
self.syms.append(MachONList(macho_file, self.is_64))
# parse strings
macho_file.seek(self.stroff)
chaines = unpack('<'+str(self.strsize)+'s', macho_file.read(self.strsize))[0]
for chaine in chaines.split("\x00"):
self.strs.append(chaine)
macho_file.seek(before)
def write(self, macho_file):
before = macho_file.tell()
macho_file.write(pack('<I', self.cmd))
macho_file.write(pack('<I', 0x0)) # cmdsize
macho_file.write(pack('<IIII', self.symoff, self.nsyms, self.stroff, self.strsize))
after = macho_file.tell()
macho_file.seek(self.symoff)
for sym in self.syms:
sym.write(macho_file)
macho_file.seek(self.stroff)
macho_file.write("\x00".join(self.strs))
macho_file.seek(before+4)
macho_file.write(pack('<I', after-before))
macho_file.seek(after)
def display(self, before=''):
print before + green("[+]")+" LC_SYMTAB"
print before + "\t- symoff : 0x%x" % self.symoff
print before + "\t- nsyms : %d" % self.nsyms
for sym in self.syms:
sym.display(before=before+"\t")
print before + "\t- stroff : 0x%x" % self.stroff
print before + "\t- strsize : %d (0x%x)" % (self.strsize, self.strsize)
print before + "\t- strings : "+str(self.strs)
| bsd-3-clause |
hurricup/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geos/prototypes/misc.py | 334 | 1438 | """
This module is for the miscellaneous GEOS routines, particularly the
ones that return the area, distance, and length.
"""
from ctypes import c_int, c_double, POINTER
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.prototypes.errcheck import check_dbl, check_string
from django.contrib.gis.geos.prototypes.geom import geos_char_p
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
__all__ = ['geos_area', 'geos_distance', 'geos_length']
### ctypes generator function ###
def dbl_from_geom(func, num_geom=1):
"""
Argument is a Geometry, return type is double that is passed
in by reference as the last argument.
"""
argtypes = [GEOM_PTR for i in xrange(num_geom)]
argtypes += [POINTER(c_double)]
func.argtypes = argtypes
func.restype = c_int # Status code returned
func.errcheck = check_dbl
return func
### ctypes prototypes ###
# Area, distance, and length prototypes.
geos_area = dbl_from_geom(GEOSFunc('GEOSArea'))
geos_distance = dbl_from_geom(GEOSFunc('GEOSDistance'), num_geom=2)
geos_length = dbl_from_geom(GEOSFunc('GEOSLength'))
# Validity reason; only in GEOS 3.1+
if GEOS_PREPARE:
geos_isvalidreason = GEOSFunc('GEOSisValidReason')
geos_isvalidreason.argtypes = [GEOM_PTR]
geos_isvalidreason.restype = geos_char_p
geos_isvalidreason.errcheck = check_string
__all__.append('geos_isvalidreason')
| apache-2.0 |
petrutlucian94/nova_dev | doc/ext/nova_todo.py | 68 | 3386 | # -*- coding: utf-8 -*-
# This is a hack of the builtin todo extension, to make the todo_list
# more user friendly.
from sphinx.ext.todo import *
import re
def _(s):
return s
def process_todo_nodes(app, doctree, fromdocname):
if not app.config['todo_include_todos']:
for node in doctree.traverse(todo_node):
node.parent.remove(node)
# Replace all todolist nodes with a list of the collected todos.
# Augment each todo with a backlink to the original location.
env = app.builder.env
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = []
# remove the item that was added in the constructor, since I'm tired of
# reading through docutils for the proper way to construct an empty list
lists = []
for i in xrange(5):
lists.append(nodes.bullet_list("", nodes.Text('', '')))
lists[i].remove(lists[i][0])
lists[i]['classes'].append('todo_list')
for node in doctree.traverse(todolist):
if not app.config['todo_include_todos']:
node.replace_self([])
continue
for todo_info in env.todo_all_todos:
para = nodes.paragraph()
# Create a reference
newnode = nodes.reference('', '')
filename = env.doc2path(todo_info['docname'], base=None)
link = (_('%(filename)s, line %(line_info)d') %
{'filename': filename, 'line_info': todo_info['lineno']})
innernode = nodes.emphasis(link, link)
newnode['refdocname'] = todo_info['docname']
try:
newnode['refuri'] = app.builder.get_relative_uri(
fromdocname, todo_info['docname'])
newnode['refuri'] += '#' + todo_info['target']['refid']
except NoUri:
# ignore if no URI can be determined, e.g. for LaTeX output
pass
newnode.append(innernode)
para += newnode
para['classes'].append('todo_link')
todo_entry = todo_info['todo']
env.resolve_references(todo_entry, todo_info['docname'],
app.builder)
item = nodes.list_item('', para)
todo_entry[1]['classes'].append('details')
comment = todo_entry[1]
m = re.match(r"^P(\d)", comment.astext())
priority = 5
if m:
priority = int(m.group(1))
if priority < 0:
priority = 1
if priority > 5:
priority = 5
item['classes'].append('todo_p' + str(priority))
todo_entry['classes'].append('todo_p' + str(priority))
item.append(comment)
lists[priority - 1].insert(0, item)
node.replace_self(lists)
def setup(app):
app.add_config_value('todo_include_todos', False, False)
app.add_node(todolist)
app.add_node(todo_node,
html=(visit_todo_node, depart_todo_node),
latex=(visit_todo_node, depart_todo_node),
text=(visit_todo_node, depart_todo_node))
app.add_directive('todo', Todo)
app.add_directive('todolist', TodoList)
app.connect('doctree-read', process_todos)
app.connect('doctree-resolved', process_todo_nodes)
app.connect('env-purge-doc', purge_todos)
| apache-2.0 |
ivanalejandro0/leap_pycommon | src/leap/common/tests/test_crypto.py | 1 | 2794 | ## -*- coding: utf-8 -*-
# test_crypto.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for the crypto submodule.
"""
from leap.common.testing.basetest import BaseLeapTest
from leap.common import crypto
from Crypto import Random
class CryptoTestCase(BaseLeapTest):
def setUp(self):
pass
def tearDown(self):
pass
def test_encrypt_decrypt_sym(self):
# generate 256-bit key
key = Random.new().read(32)
iv, cyphertext = crypto.encrypt_sym(
'data', key,
method=crypto.EncryptionMethods.AES_256_CTR)
self.assertTrue(cyphertext is not None)
self.assertTrue(cyphertext != '')
self.assertTrue(cyphertext != 'data')
plaintext = crypto.decrypt_sym(
cyphertext, key, iv=iv,
method=crypto.EncryptionMethods.AES_256_CTR)
self.assertEqual('data', plaintext)
def test_decrypt_with_wrong_iv_fails(self):
key = Random.new().read(32)
iv, cyphertext = crypto.encrypt_sym(
'data', key,
method=crypto.EncryptionMethods.AES_256_CTR)
self.assertTrue(cyphertext is not None)
self.assertTrue(cyphertext != '')
self.assertTrue(cyphertext != 'data')
iv += 1
plaintext = crypto.decrypt_sym(
cyphertext, key, iv=iv,
method=crypto.EncryptionMethods.AES_256_CTR)
self.assertNotEqual('data', plaintext)
def test_decrypt_with_wrong_key_fails(self):
key = Random.new().read(32)
iv, cyphertext = crypto.encrypt_sym(
'data', key,
method=crypto.EncryptionMethods.AES_256_CTR)
self.assertTrue(cyphertext is not None)
self.assertTrue(cyphertext != '')
self.assertTrue(cyphertext != 'data')
wrongkey = Random.new().read(32) # 256-bits key
# ensure keys are different in case we are extremely lucky
while wrongkey == key:
wrongkey = Random.new().read(32)
plaintext = crypto.decrypt_sym(
cyphertext, wrongkey, iv=iv,
method=crypto.EncryptionMethods.AES_256_CTR)
self.assertNotEqual('data', plaintext)
| gpl-3.0 |
slackhq/python-slackclient | tests/webhook/test_async_webhook.py | 1 | 6601 | import asyncio
import unittest
import aiohttp
from slack.web.classes.attachments import Attachment, AttachmentField
from slack.web.classes.blocks import SectionBlock, ImageBlock
from slack.webhook import AsyncWebhookClient, WebhookResponse
from tests.helpers import async_test
from tests.webhook.mock_web_api_server import (
cleanup_mock_web_api_server,
setup_mock_web_api_server,
)
class TestAsyncWebhook(unittest.TestCase):
def setUp(self):
setup_mock_web_api_server(self)
def tearDown(self):
cleanup_mock_web_api_server(self)
@async_test
async def test_send(self):
client = AsyncWebhookClient("http://localhost:8888")
resp: WebhookResponse = await client.send(text="hello!")
self.assertEqual(200, resp.status_code)
self.assertEqual("ok", resp.body)
resp = await client.send(text="hello!", response_type="in_channel")
self.assertEqual("ok", resp.body)
@async_test
async def test_send_blocks(self):
client = AsyncWebhookClient("http://localhost:8888")
resp = await client.send(
text="hello!",
response_type="ephemeral",
blocks=[
SectionBlock(text="Some text"),
ImageBlock(image_url="image.jpg", alt_text="an image"),
],
)
self.assertEqual("ok", resp.body)
resp = await client.send(
text="hello!",
response_type="ephemeral",
blocks=[
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "This is a mrkdwn section block :ghost: *this is bold*, and ~this is crossed out~, and <https://google.com|this is a link>",
},
},
{"type": "divider"},
{
"type": "section",
"text": {"type": "mrkdwn", "text": "Pick a date for the deadline."},
"accessory": {
"type": "datepicker",
"initial_date": "1990-04-28",
"placeholder": {
"type": "plain_text",
"text": "Select a date",
},
},
},
],
)
self.assertEqual("ok", resp.body)
resp = await client.send(
text="hello!",
response_type="ephemeral",
blocks=[
SectionBlock(text="Some text"),
ImageBlock(image_url="image.jpg", alt_text="an image"),
],
)
self.assertEqual("ok", resp.body)
@async_test
async def test_send_attachments(self):
client = AsyncWebhookClient("http://localhost:8888")
resp = await client.send(
text="hello!",
response_type="ephemeral",
attachments=[
{
"color": "#f2c744",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "This is a mrkdwn section block :ghost: *this is bold*, and ~this is crossed out~, and <https://google.com|this is a link>",
},
},
{"type": "divider"},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "Pick a date for the deadline.",
},
"accessory": {
"type": "datepicker",
"initial_date": "1990-04-28",
"placeholder": {
"type": "plain_text",
"text": "Select a date",
},
},
},
],
}
],
)
self.assertEqual("ok", resp.body)
resp = await client.send(
text="hello!",
response_type="ephemeral",
attachments=[
Attachment(
text="attachment text",
title="Attachment",
fallback="fallback_text",
pretext="some_pretext",
title_link="link in title",
fields=[
AttachmentField(
title=f"field_{i}_title", value=f"field_{i}_value"
)
for i in range(5)
],
color="#FFFF00",
author_name="John Doe",
author_link="http://johndoeisthebest.com",
author_icon="http://johndoeisthebest.com/avatar.jpg",
thumb_url="thumbnail URL",
footer="and a footer",
footer_icon="link to footer icon",
ts=123456789,
markdown_in=["fields"],
)
],
)
self.assertEqual("ok", resp.body)
@async_test
async def test_send_dict(self):
client = AsyncWebhookClient("http://localhost:8888")
resp: WebhookResponse = await client.send_dict({"text": "hello!"})
self.assertEqual(200, resp.status_code)
self.assertEqual("ok", resp.body)
@async_test
async def test_timeout_issue_712(self):
client = AsyncWebhookClient(url="http://localhost:8888/timeout", timeout=1)
with self.assertRaises(Exception):
await client.send_dict({"text": "hello!"})
@async_test
async def test_proxy_issue_714(self):
client = AsyncWebhookClient(
url="http://localhost:8888", proxy="http://invalid-host:9999"
)
with self.assertRaises(Exception):
await client.send_dict({"text": "hello!"})
@async_test
async def test_user_agent_customization_issue_769(self):
client = AsyncWebhookClient(
url="http://localhost:8888/user-agent-this_is-test",
user_agent_prefix="this_is",
user_agent_suffix="test",
)
resp = await client.send_dict({"text": "hi!"})
self.assertEqual(resp.body, "ok")
| mit |
cihatix/peach | Peach/Publishers/smtp.py | 3 | 4329 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import time
import sys
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email import Encoders
from email.Utils import COMMASPACE, formatdate
from Peach.publisher import Publisher
from Peach.Engine.common import PeachException
class SMTPPublisher(Publisher):
def __init__(self, host="localhost",
port=smtplib.SMTP_PORT,
debugLevel=0,
mailFrom="localhost@localhost",
mailTo="localhost@localhost",
username="",
password=""):
Publisher.__init__(self)
self.host = host
try:
self.port = int(port)
except:
raise PeachException("The SMTP publisher parameter for port is not a valid number.")
self.debugLevel = int(debugLevel)
self.mailFrom = mailFrom
self.mailTo = mailTo
self.username = username
self.password = password
self.loadBalance = 0
self._connected = None
def start(self):
if self._connected:
return
print("[*] Connecting to %s:%d ..." % (self.host, self.port))
try:
self.smtp = smtplib.SMTP(self.host, self.port)
except:
raise PeachException("Peer %s:%d is down or connection settings are wrong." % (self.host, self.port))
self._connected = True
self.smtp.set_debuglevel(self.debugLevel)
def connect(self):
pass
def send(self, data):
if not self.loadBalance % 500 and self.loadBalance != 0:
print("[*] Pause ...")
time.sleep(10)
for i in range(3):
try:
self.smtp.sendmail(self.mailFrom, self.mailTo, data)
exception = None
break
except:
exception = sys.exc_info()
time.sleep(5)
if exception:
reason = ""
try:
reason = str(exception[1])
except:
reason = "unknown reason."
message = "SMTP send mail to %s:%d failed: %s" % (self.host, self.port, reason)
if message.find("5.4.0") > -1:
print(message)
else:
self.smtp.close()
raise PeachException(message)
self.loadBalance += 1
def close(self):
pass
def stop(self):
pass
class EmailAttachment(Publisher):
"""
Send fuzzed data as email attachment.
"""
def __init__(self, server, fileName, msgTo, msgFrom="peach@peach.org",
msgSubject="Fuzzing Test",
msgText="Message generated by Peach Fuzzing Platform.\n\nhttp://peachfuzzer.com\n\n - Peach\n"):
Publisher.__init__(self)
self.server = server
self.fileName = fileName
self.msgFrom = msgFrom
self.msgTo = msgTo
self.msgSubject = msgSubject
self.msgText = msgText
def send(self, data):
"""
Publish some data
@type data: string
@param data: Data to publish
"""
# Build Message Body
msg = MIMEMultipart()
msg['From'] = self.msgFrom
msg['To'] = self.msgTo
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = self.msgSubject
msg.attach(MIMEText(self.msgText))
# Attach file
part = MIMEBase('application', 'pdf')
part.set_payload(data)
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % self.fileName)
msg.attach(part)
# Send email
smtp = smtplib.SMTP(self.server)
smtp.sendmail(self.msgFrom, self.msgTo, msg.as_string())
smtp.close()
def connect(self):
"""
Called to connect or open a connection/file.
"""
pass
def close(self):
"""
Close current stream/connection.
"""
pass
class _OleStorage(object):
"""
This class wraps OLE Storage APIs
"""
pass
| mpl-2.0 |
julian-seward1/servo | tests/wpt/css-tests/tools/pytest/_pytest/mark.py | 168 | 11093 | """ generic mechanism for marking and selecting python functions. """
import inspect
class MarkerError(Exception):
"""Error in use of a pytest marker/attribute."""
def pytest_namespace():
return {'mark': MarkGenerator()}
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
'-k',
action="store", dest="keyword", default='', metavar="EXPRESSION",
help="only run tests which match the given substring expression. "
"An expression is a python evaluatable expression "
"where all names are substring-matched against test names "
"and their parent classes. Example: -k 'test_method or test "
"other' matches all test functions and classes whose name "
"contains 'test_method' or 'test_other'. "
"Additionally keywords are matched to classes and functions "
"containing extra names in their 'extra_keyword_matches' set, "
"as well as functions which have names assigned directly to them."
)
group._addoption(
"-m",
action="store", dest="markexpr", default="", metavar="MARKEXPR",
help="only run tests matching given mark expression. "
"example: -m 'mark1 and not mark2'."
)
group.addoption(
"--markers", action="store_true",
help="show markers (builtin, plugin and per-project ones)."
)
parser.addini("markers", "markers for test functions", 'linelist')
def pytest_cmdline_main(config):
import _pytest.config
if config.option.markers:
config._do_configure()
tw = _pytest.config.create_terminal_writer(config)
for line in config.getini("markers"):
name, rest = line.split(":", 1)
tw.write("@pytest.mark.%s:" % name, bold=True)
tw.line(rest)
tw.line()
config._ensure_unconfigure()
return 0
pytest_cmdline_main.tryfirst = True
def pytest_collection_modifyitems(items, config):
keywordexpr = config.option.keyword
matchexpr = config.option.markexpr
if not keywordexpr and not matchexpr:
return
# pytest used to allow "-" for negating
# but today we just allow "-" at the beginning, use "not" instead
# we probably remove "-" alltogether soon
if keywordexpr.startswith("-"):
keywordexpr = "not " + keywordexpr[1:]
selectuntil = False
if keywordexpr[-1:] == ":":
selectuntil = True
keywordexpr = keywordexpr[:-1]
remaining = []
deselected = []
for colitem in items:
if keywordexpr and not matchkeyword(colitem, keywordexpr):
deselected.append(colitem)
else:
if selectuntil:
keywordexpr = None
if matchexpr:
if not matchmark(colitem, matchexpr):
deselected.append(colitem)
continue
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
class MarkMapping:
"""Provides a local mapping for markers where item access
resolves to True if the marker is present. """
def __init__(self, keywords):
mymarks = set()
for key, value in keywords.items():
if isinstance(value, MarkInfo) or isinstance(value, MarkDecorator):
mymarks.add(key)
self._mymarks = mymarks
def __getitem__(self, name):
return name in self._mymarks
class KeywordMapping:
"""Provides a local mapping for keywords.
Given a list of names, map any substring of one of these names to True.
"""
def __init__(self, names):
self._names = names
def __getitem__(self, subname):
for name in self._names:
if subname in name:
return True
return False
def matchmark(colitem, markexpr):
"""Tries to match on any marker names, attached to the given colitem."""
return eval(markexpr, {}, MarkMapping(colitem.keywords))
def matchkeyword(colitem, keywordexpr):
"""Tries to match given keyword expression to given collector item.
Will match on the name of colitem, including the names of its parents.
Only matches names of items which are either a :class:`Class` or a
:class:`Function`.
Additionally, matches on names in the 'extra_keyword_matches' set of
any item, as well as names directly assigned to test functions.
"""
mapped_names = set()
# Add the names of the current item and any parent items
import pytest
for item in colitem.listchain():
if not isinstance(item, pytest.Instance):
mapped_names.add(item.name)
# Add the names added as extra keywords to current or parent items
for name in colitem.listextrakeywords():
mapped_names.add(name)
# Add the names attached to the current function through direct assignment
if hasattr(colitem, 'function'):
for name in colitem.function.__dict__:
mapped_names.add(name)
mapping = KeywordMapping(mapped_names)
if " " not in keywordexpr:
# special case to allow for simple "-k pass" and "-k 1.3"
return mapping[keywordexpr]
elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]:
return not mapping[keywordexpr[4:]]
return eval(keywordexpr, {}, mapping)
def pytest_configure(config):
import pytest
if config.option.strict:
pytest.mark._config = config
class MarkGenerator:
""" Factory for :class:`MarkDecorator` objects - exposed as
a ``pytest.mark`` singleton instance. Example::
import pytest
@pytest.mark.slowtest
def test_function():
pass
will set a 'slowtest' :class:`MarkInfo` object
on the ``test_function`` object. """
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError("Marker name must NOT start with underscore")
if hasattr(self, '_config'):
self._check(name)
return MarkDecorator(name)
def _check(self, name):
try:
if name in self._markers:
return
except AttributeError:
pass
self._markers = l = set()
for line in self._config.getini("markers"):
beginning = line.split(":", 1)
x = beginning[0].split("(", 1)[0]
l.add(x)
if name not in self._markers:
raise AttributeError("%r not a registered marker" % (name,))
def istestfunc(func):
return hasattr(func, "__call__") and \
getattr(func, "__name__", "<lambda>") != "<lambda>"
class MarkDecorator:
""" A decorator for test functions and test classes. When applied
it will create :class:`MarkInfo` objects which may be
:ref:`retrieved by hooks as item keywords <excontrolskip>`.
MarkDecorator instances are often created like this::
mark1 = pytest.mark.NAME # simple MarkDecorator
mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator
and can then be applied as decorators to test functions::
@mark2
def test_function():
pass
When a MarkDecorator instance is called it does the following:
1. If called with a single class as its only positional argument and no
additional keyword arguments, it attaches itself to the class so it
gets applied automatically to all test cases found in that class.
2. If called with a single function as its only positional argument and
no additional keyword arguments, it attaches a MarkInfo object to the
function, containing all the arguments already stored internally in
the MarkDecorator.
3. When called in any other case, it performs a 'fake construction' call,
i.e. it returns a new MarkDecorator instance with the original
MarkDecorator's content updated with the arguments passed to this
call.
Note: The rules above prevent MarkDecorator objects from storing only a
single function or class reference as their positional argument with no
additional keyword or positional arguments.
"""
def __init__(self, name, args=None, kwargs=None):
self.name = name
self.args = args or ()
self.kwargs = kwargs or {}
@property
def markname(self):
return self.name # for backward-compat (2.4.1 had this attr)
def __repr__(self):
d = self.__dict__.copy()
name = d.pop('name')
return "<MarkDecorator %r %r>" % (name, d)
def __call__(self, *args, **kwargs):
""" if passed a single callable argument: decorate it with mark info.
otherwise add *args/**kwargs in-place to mark information. """
if args and not kwargs:
func = args[0]
is_class = inspect.isclass(func)
if len(args) == 1 and (istestfunc(func) or is_class):
if is_class:
if hasattr(func, 'pytestmark'):
mark_list = func.pytestmark
if not isinstance(mark_list, list):
mark_list = [mark_list]
# always work on a copy to avoid updating pytestmark
# from a superclass by accident
mark_list = mark_list + [self]
func.pytestmark = mark_list
else:
func.pytestmark = [self]
else:
holder = getattr(func, self.name, None)
if holder is None:
holder = MarkInfo(
self.name, self.args, self.kwargs
)
setattr(func, self.name, holder)
else:
holder.add(self.args, self.kwargs)
return func
kw = self.kwargs.copy()
kw.update(kwargs)
args = self.args + args
return self.__class__(self.name, args=args, kwargs=kw)
class MarkInfo:
""" Marking object created by :class:`MarkDecorator` instances. """
def __init__(self, name, args, kwargs):
#: name of attribute
self.name = name
#: positional argument list, empty if none specified
self.args = args
#: keyword argument dictionary, empty if nothing specified
self.kwargs = kwargs.copy()
self._arglist = [(args, kwargs.copy())]
def __repr__(self):
return "<MarkInfo %r args=%r kwargs=%r>" % (
self.name, self.args, self.kwargs
)
def add(self, args, kwargs):
""" add a MarkInfo with the given args and kwargs. """
self._arglist.append((args, kwargs))
self.args += args
self.kwargs.update(kwargs)
def __iter__(self):
""" yield MarkInfo objects each relating to a marking-call. """
for args, kwargs in self._arglist:
yield MarkInfo(self.name, args, kwargs)
| mpl-2.0 |
h4ck3rm1k3/pip | pip/_vendor/html5lib/treewalkers/lxmletree.py | 436 | 5992 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), "Text nodes have no children"
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
| mit |
jaruba/chromium.src | build/android/pylib/remote/device/appurify_sanitized.py | 51 | 1146 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import logging
import os
import sys
from pylib import constants
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'requests', 'src'))
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'appurify-python', 'src'))
handlers_before = list(logging.getLogger().handlers)
import appurify.api
import appurify.utils
handlers_after = list(logging.getLogger().handlers)
new_handler = list(set(handlers_after) - set(handlers_before))
while new_handler:
logging.info("Removing logging handler.")
logging.getLogger().removeHandler(new_handler.pop())
api = appurify.api
utils = appurify.utils
# This is not thread safe. If multiple threads are ever supported with appurify
# this may cause logging messages to go missing.
@contextlib.contextmanager
def SanitizeLogging(verbose_count, level):
if verbose_count < 2:
logging.disable(level)
yield True
logging.disable(logging.NOTSET)
else:
yield False
| bsd-3-clause |
noahc3/PokemonGoTeamManager | libs/py/pogoapi/POGOProtos/Inventory/Item/ItemData_pb2.py | 15 | 3163 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Inventory/Item/ItemData.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Inventory.Item import ItemId_pb2 as POGOProtos_dot_Inventory_dot_Item_dot_ItemId__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Inventory/Item/ItemData.proto',
package='POGOProtos.Inventory.Item',
syntax='proto3',
serialized_pb=_b('\n(POGOProtos/Inventory/Item/ItemData.proto\x12\x19POGOProtos.Inventory.Item\x1a&POGOProtos/Inventory/Item/ItemId.proto\"]\n\x08ItemData\x12\x32\n\x07item_id\x18\x01 \x01(\x0e\x32!.POGOProtos.Inventory.Item.ItemId\x12\r\n\x05\x63ount\x18\x02 \x01(\x05\x12\x0e\n\x06unseen\x18\x03 \x01(\x08\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Inventory_dot_Item_dot_ItemId__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ITEMDATA = _descriptor.Descriptor(
name='ItemData',
full_name='POGOProtos.Inventory.Item.ItemData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='item_id', full_name='POGOProtos.Inventory.Item.ItemData.item_id', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='count', full_name='POGOProtos.Inventory.Item.ItemData.count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unseen', full_name='POGOProtos.Inventory.Item.ItemData.unseen', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=111,
serialized_end=204,
)
_ITEMDATA.fields_by_name['item_id'].enum_type = POGOProtos_dot_Inventory_dot_Item_dot_ItemId__pb2._ITEMID
DESCRIPTOR.message_types_by_name['ItemData'] = _ITEMDATA
ItemData = _reflection.GeneratedProtocolMessageType('ItemData', (_message.Message,), dict(
DESCRIPTOR = _ITEMDATA,
__module__ = 'POGOProtos.Inventory.Item.ItemData_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Inventory.Item.ItemData)
))
_sym_db.RegisterMessage(ItemData)
# @@protoc_insertion_point(module_scope)
| mit |
USGSDenverPychron/pychron | pychron/lasers/laser_managers/pulse.py | 1 | 6771 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import HasTraits, Any, Instance, Float, Event, \
Property, Bool, on_trait_change
from traitsui.api import View, Item, Handler, HGroup, ButtonEditor, spring, VGroup, Spring, UItem, RangeEditor
# import apptools.sweet_pickle as pickle
# ============= standard library imports ========================
# import os
from threading import Thread
# ============= local library imports ==========================
# from pychron.paths import paths
# from pychron.pyscripts.wait_dialog import WaitDialog
# import time
from pychron.core.ui.custom_label_editor import CustomLabel
from pychron.wait.wait_control import WaitControl
class PulseHandler(Handler):
def close(self, info, ok):
info.object.dump_pulse()
return ok
class Pulse(HasTraits):
duration = Float(1)
wait_control = Instance(WaitControl, transient=True)
manager = Any(transient=True)
power = Float(1.1, auto_set=False, enter_set=True)
units = Property
pulse_button = Event
pulse_label = Property(depends_on='pulsing')
pulsing = Bool(False)
enabled = Bool(False)
disable_at_end = Bool(False)
# def dump(self):
# p = os.path.join(paths.hidden_dir, 'pulse')
# with open(p, 'wb') as f:
# pickle.dump(self, f)
@on_trait_change('manager:enabled')
def upad(self, obj, name, old, new):
self.enabled = new
def _power_changed(self):
if self.pulsing and self.manager:
self.manager.set_laser_power(self.power)
def _duration_changed(self):
self.wait_control.duration = self.duration
self.wait_control.reset()
def _wait_control_default(self):
return WaitControl(low_name=0,
auto_start=False,
duration=self.duration,
title='',
dispose_at_end=False)
def start(self):
self._duration_changed()
# evt = TEvent()
man = self.manager
if man is not None:
# man.enable_laser()
resp = man.set_laser_power(self.power)
if resp is False:
self.pulsing = False
self.trait_set(duration=0, trait_change_notify=False)
return
self.wait_control.start()
self.pulsing = False
if man is not None:
if self.disable_at_end:
man.disable_laser()
else:
man.set_laser_power(0)
def _get_pulse_label(self):
return 'Fire' if not self.pulsing else 'Stop'
def _get_units(self):
return self.manager.units
def _pulse_button_fired(self):
if self.pulsing:
self.pulsing = False
self.wait_control.current_time = -1
else:
self.pulsing = True
t = Thread(target=self.start)
t.start()
def traits_view(self):
v = View(
VGroup(
VGroup(
HGroup(Item('power', tooltip='Hit Enter for change to take effect'),
Item('units', style='readonly', show_label=False),
spring,
Item('pulse_button',
editor=ButtonEditor(label_value='pulse_label'),
show_label=False,
enabled_when='object.enabled')),
Item('duration', label='Duration (s)', tooltip='Set the laser pulse duration in seconds')),
VGroup(
CustomLabel('object.wait_control.message',
size=14,
weight='bold',
color_name='object.wait_control.message_color'),
HGroup(Spring(width=-5, springy=False),
Item('object.wait_control.high', label='Set Max. Seconds'),
spring, UItem('object.wait_control.continue_button')),
HGroup(Spring(width=-5, springy=False),
Item('object.wait_control.current_time', show_label=False,
editor=RangeEditor(mode='slider',
low=1,
# low_name='low_name',
high_name='object.wait_control.duration')),
CustomLabel('object.wait_control.current_time',
size=14,
weight='bold')))),
# Item('wait_control', show_label=False, style='custom'),
id='pulse',
handler=PulseHandler())
return v
# class LaserPulseManager(Manager):
# # pulse_button = Event
# # pulse_label = Property
# # pulsing = Bool(False)
# pulse = Instance(Pulse)
#
# def dump_pulse(self):
# p = os.path.join(hidden_dir, 'pulse')
# with open(p, 'w') as f:
# pickle.dump(self.pulse, f)
#
# def _pulse_default(self):
# p = os.path.join(hidden_dir, 'pulse')
# if os.path.isfile(p):
# with open(p, 'r') as f:
# try:
# pul = pickle.load(f)
# pul.manager = self.parent
# except pickle.PickleError:
# pul = Pulse(manager=self.parent)
# else:
# pul = Pulse(manager=self.parent)
# # pul = Pulse(manager=self.parent)
#
# return pul
#
# def standalone_view(self):
# v = View(#self._button_factory('pulse_button', 'pulse_label', align='right'),
# Item('pulse', show_label=False, style='custom'),
# title='Pulse',
# resizable=True,
# handler=PulseHandler
# )
# return v
#
#
# if __name__ == '__main__':
# lp = LaserPulseManager()
# lp.configure_traits()
# ============= EOF ====================================
| apache-2.0 |
iamlalit/olympia-volunteer | app/lib/bootstrap/test-infra/s3_cache.py | 1700 | 3523 | #!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
| mit |
RandyLowery/erpnext | erpnext/patches/v5_0/index_on_account_and_gl_entry.py | 60 | 1275 | from __future__ import unicode_literals
import frappe
def execute():
index_map = {
"Account": ["parent_account", "lft", "rgt"],
"GL Entry": ["posting_date", "account", 'party', "voucher_no"],
"Sales Invoice": ["posting_date", "debit_to", "customer"],
"Purchase Invoice": ["posting_date", "credit_to", "supplier"]
}
for dt, indexes in index_map.items():
existing_indexes = [(d.Key_name, d.Column_name) for d in frappe.db.sql("""show index from `tab{0}`
where Column_name != 'name'""".format(dt), as_dict=1)]
for old, column in existing_indexes:
if column in ("parent", "group_or_ledger", "is_group", "is_pl_account", "debit_or_credit",
"account_name", "company", "project", "voucher_date", "due_date", "bill_no",
"bill_date", "is_opening", "fiscal_year", "outstanding_amount"):
frappe.db.sql("alter table `tab{0}` drop index {1}".format(dt, old))
existing_indexes = [(d.Key_name, d.Column_name) for d in frappe.db.sql("""show index from `tab{0}`
where Column_name != 'name'""".format(dt), as_dict=1)]
existing_indexed_columns = list(set([x[1] for x in existing_indexes]))
for new in indexes:
if new not in existing_indexed_columns:
frappe.db.sql("alter table `tab{0}` add index ({1})".format(dt, new)) | gpl-3.0 |
withrocks/arteria-siswrap | tests/unit/siswrap_handlers_tests.py | 1 | 4576 | import pytest
import tornado.web
import jsonpickle
from arteria import *
from arteria.configuration import ConfigurationService
from siswrap.app import *
from siswrap.handlers import *
from siswrap.wrapper_services import *
# Some unit tests for siswrap.handlers
API_URL = "/api/1.0"
@pytest.fixture
def app():
config_svc = ConfigurationService(app_config_path="./config/app.config")
process_svc = ProcessService(config_svc)
args = dict(process_svc=process_svc, config_svc=config_svc)
app = tornado.web.Application([
(r"/api/1.0/(?:qc|report)/run/([\w_-]+)", RunHandler, args),
(r"/api/1.0/(?:qc|report)/status/(\d*)", StatusHandler, args)
], debug=True)
return app
@pytest.fixture
def stub_isdir(monkeypatch):
def my_isdir(path):
return True
monkeypatch.setattr("os.path.isdir", my_isdir)
def json(payload):
return jsonpickle.encode(payload)
class TestRunHandler(object):
@pytest.mark.gen_test
def test_post_job(self, http_client, http_server, base_url, stub_isdir):
payload = {"runfolder": "foo"}
resp = yield http_client.fetch(base_url + API_URL + "/report/run/123",
method="POST", body=json(payload))
assert resp.code == 202
payload = jsonpickle.decode(resp.body)
assert payload["runfolder"] == "/data/testarteria1/mon1/foo"
class TestStatusHandler(object):
@pytest.mark.gen_test
def test_get_global_status(self, http_client, http_server,
base_url, monkeypatch, stub_isdir):
def my_get_all(self, wrapper_type):
return []
monkeypatch.setattr("siswrap.wrapper_services.ProcessService.get_all",
my_get_all)
resp = yield http_client.fetch(base_url + API_URL + "/report/status/")
assert resp.code == 200
assert len(resp.body) == 2 # [ and ]
resp = yield http_client.fetch(base_url + API_URL + "/qc/status/")
assert resp.code == 200
assert len(resp.body) == 2
@pytest.mark.gen_test
def test_get_global_filled_status(self, http_client, http_server,
base_url, monkeypatch, stub_isdir):
def my_get_all(self, wrapper_type):
if wrapper_type == "report":
return [{"pid": 4242}, {"pid": 3131}]
elif wrapper_type == "qc":
return [{"pid": 2424}, {"pid": 1313}]
monkeypatch.setattr("siswrap.wrapper_services.ProcessService.get_all",
my_get_all)
resp = yield http_client.fetch(base_url + API_URL + "/report/status/")
assert resp.code == 200
payload = jsonpickle.decode(resp.body)
assert payload[0]["pid"] == 4242
assert payload[1]["pid"] == 3131
@pytest.mark.gen_test
def test_get_existing_status(self, http_client, http_server,
base_url, monkeypatch, stub_isdir):
def my_get(self, pid, wrapper_type):
return ProcessInfo(runfolder="foo", host="bar",
state=State.STARTED,
proc=None, msg=None, pid=pid)
monkeypatch.setattr("siswrap.wrapper_services.ProcessService.get_status",
my_get)
resp = yield http_client.fetch(base_url + API_URL +
"/report/status/123")
assert resp.code == 200
payload = jsonpickle.decode(resp.body)
assert payload["pid"] == 123
assert payload["state"] == State.STARTED
resp = yield http_client.fetch(base_url + API_URL + "/qc/status/321")
assert resp.code == 200
payload = jsonpickle.decode(resp.body)
assert payload["pid"] == 321
@pytest.mark.gen_test
def test_get_invalid_status(self, http_client, http_server,
base_url, monkeypatch, stub_isdir):
def my_get(self, pid, wrapper_type):
return ProcessInfo(runfolder=None, host="foobar",
state=State.NONE,
proc=None, msg=None, pid=pid)
monkeypatch.setattr("siswrap.wrapper_services.ProcessService.get_status",
my_get)
with pytest.raises(Exception) as err:
resp = yield http_client.fetch(base_url + API_URL +
"/report/status/123")
assert resp.code == 500
if __name__ == '__main__':
pytest.main()
| mit |
romain-li/edx-platform | cms/djangoapps/maintenance/views.py | 16 | 7585 | """
Views for the maintenance app.
"""
import logging
from django.db import transaction
from django.core.validators import ValidationError
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic import View
from edxmako.shortcuts import render_to_response
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from contentstore.management.commands.utils import get_course_versions
from util.json_request import JsonResponse
from util.views import require_global_staff
log = logging.getLogger(__name__)
# This dict maintains all the views that will be used Maintenance app.
MAINTENANCE_VIEWS = {
'force_publish_course': {
'url': 'maintenance:force_publish_course',
'name': _('Force Publish Course'),
'slug': 'force_publish_course',
'description': _(
'Sometimes the draft and published branches of a course can get out of sync. Force publish course command '
'resets the published branch of a course to point to the draft branch, effectively force publishing the '
'course. This view dry runs the force publish command'
),
},
}
COURSE_KEY_ERROR_MESSAGES = {
'empty_course_key': _('Please provide course id.'),
'invalid_course_key': _('Invalid course key.'),
'course_key_not_found': _('No matching course found.')
}
class MaintenanceIndexView(View):
"""
Index view for maintenance dashboard, used by global staff.
This view lists some commands/tasks that can be used to dry run or execute directly.
"""
@method_decorator(require_global_staff)
def get(self, request):
"""Render the maintenance index view. """
return render_to_response('maintenance/index.html', {
'views': MAINTENANCE_VIEWS,
})
class MaintenanceBaseView(View):
"""
Base class for Maintenance views.
"""
template = 'maintenance/container.html'
def __init__(self, view=None):
self.context = {
'view': view if view else '',
'form_data': {},
'error': False,
'msg': ''
}
def render_response(self):
"""
A short method to render_to_response that renders response.
"""
if self.request.is_ajax():
return JsonResponse(self.context)
return render_to_response(self.template, self.context)
@method_decorator(require_global_staff)
def get(self, request):
"""
Render get view.
"""
return self.render_response()
def validate_course_key(self, course_key, branch=ModuleStoreEnum.BranchName.draft):
"""
Validates the course_key that would be used by maintenance app views.
Arguments:
course_key (string): a course key
branch: a course locator branch, default value is ModuleStoreEnum.BranchName.draft .
values can be either ModuleStoreEnum.BranchName.draft or ModuleStoreEnum.BranchName.published.
Returns:
course_usage_key (CourseLocator): course usage locator
"""
if not course_key:
raise ValidationError(COURSE_KEY_ERROR_MESSAGES['empty_course_key'])
course_usage_key = CourseKey.from_string(course_key)
if not modulestore().has_course(course_usage_key):
raise ItemNotFoundError(COURSE_KEY_ERROR_MESSAGES['course_key_not_found'])
# get branch specific locator
course_usage_key = course_usage_key.for_branch(branch)
return course_usage_key
class ForcePublishCourseView(MaintenanceBaseView):
"""
View for force publishing state of the course, used by the global staff.
This view uses `force_publish_course` method of modulestore which publishes the draft state of the course. After
the course has been forced published, both draft and publish draft point to same location.
"""
def __init__(self):
super(ForcePublishCourseView, self).__init__(MAINTENANCE_VIEWS['force_publish_course'])
self.context.update({
'current_versions': [],
'updated_versions': [],
'form_data': {
'course_id': '',
'is_dry_run': True
}
})
def get_course_branch_versions(self, versions):
"""
Returns a dict containing unicoded values of draft and published draft versions.
"""
return {
'draft-branch': unicode(versions['draft-branch']),
'published-branch': unicode(versions['published-branch'])
}
@transaction.atomic
@method_decorator(require_global_staff)
def post(self, request):
"""
This method force publishes a course if dry-run argument is not selected. If dry-run is selected, this view
shows possible outcome if the `force_publish_course` modulestore method is executed.
Arguments:
course_id (string): a request parameter containing course id
is_dry_run (string): a request parameter containing dry run value.
It is obtained from checkbox so it has either values 'on' or ''.
"""
course_id = request.POST.get('course-id')
self.context.update({
'form_data': {
'course_id': course_id
}
})
try:
course_usage_key = self.validate_course_key(course_id)
except InvalidKeyError:
self.context['error'] = True
self.context['msg'] = COURSE_KEY_ERROR_MESSAGES['invalid_course_key']
except ItemNotFoundError as exc:
self.context['error'] = True
self.context['msg'] = exc.message
except ValidationError as exc:
self.context['error'] = True
self.context['msg'] = exc.message
if self.context['error']:
return self.render_response()
source_store = modulestore()._get_modulestore_for_courselike(course_usage_key) # pylint: disable=protected-access
if not hasattr(source_store, 'force_publish_course'):
self.context['msg'] = _('Force publishing course is not supported with old mongo courses.')
log.warning(
'Force publishing course is not supported with old mongo courses. \
%s attempted to force publish the course %s.',
request.user,
course_id,
exc_info=True
)
return self.render_response()
current_versions = self.get_course_branch_versions(get_course_versions(course_id))
# if publish and draft are NOT different
if current_versions['published-branch'] == current_versions['draft-branch']:
self.context['msg'] = _('Course is already in published state.')
log.warning(
'Course is already in published state. %s attempted to force publish the course %s.',
request.user,
course_id,
exc_info=True
)
return self.render_response()
self.context['current_versions'] = current_versions
log.info(
'%s dry ran force publish the course %s.',
request.user,
course_id,
exc_info=True
)
return self.render_response()
| agpl-3.0 |
ingokegel/intellij-community | python/helpers/py2only/docutils/parsers/rst/languages/pt_br.py | 128 | 3992 | # $Id: pt_br.py 7119 2011-09-02 13:00:23Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Brazilian Portuguese-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'aten\u00E7\u00E3o': 'attention',
'cuidado': 'caution',
u'code (translation required)': 'code',
'perigo': 'danger',
'erro': 'error',
u'sugest\u00E3o': 'hint',
'importante': 'important',
'nota': 'note',
'dica': 'tip',
'aviso': 'warning',
u'exorta\u00E7\u00E3o': 'admonition',
'barra-lateral': 'sidebar',
u't\u00F3pico': 'topic',
'bloco-de-linhas': 'line-block',
'literal-interpretado': 'parsed-literal',
'rubrica': 'rubric',
u'ep\u00EDgrafo': 'epigraph',
'destaques': 'highlights',
u'cita\u00E7\u00E3o-destacada': 'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'perguntas': 'questions',
#'qa': 'questions',
#'faq': 'questions',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'list-table (translation required)': 'list-table',
'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
'imagem': 'image',
'figura': 'figure',
u'inclus\u00E3o': 'include',
'cru': 'raw',
u'substitui\u00E7\u00E3o': 'replace',
'unicode': 'unicode',
'data': 'date',
'classe': 'class',
'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'\u00EDndice': 'contents',
'numsec': 'sectnum',
u'numera\u00E7\u00E3o-de-se\u00E7\u00F5es': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#u'notas-de-rorap\u00E9': 'footnotes',
#u'cita\u00E7\u00F5es': 'citations',
u'links-no-rodap\u00E9': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Brazilian Portuguese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
u'abbrevia\u00E7\u00E3o': 'abbreviation',
'ab': 'abbreviation',
u'acr\u00F4nimo': 'acronym',
'ac': 'acronym',
u'code (translation required)': 'code',
u'\u00EDndice-remissivo': 'index',
'i': 'index',
'subscrito': 'subscript',
'sub': 'subscript',
'sobrescrito': 'superscript',
'sob': 'superscript',
u'refer\u00EAncia-a-t\u00EDtulo': 'title-reference',
u't\u00EDtulo': 'title-reference',
't': 'title-reference',
u'refer\u00EAncia-a-pep': 'pep-reference',
'pep': 'pep-reference',
u'refer\u00EAncia-a-rfc': 'rfc-reference',
'rfc': 'rfc-reference',
u'\u00EAnfase': 'emphasis',
'forte': 'strong',
'literal': 'literal',
'math (translation required)': 'math', # translation required?
u'refer\u00EAncia-por-nome': 'named-reference',
u'refer\u00EAncia-an\u00F4nima': 'anonymous-reference',
u'refer\u00EAncia-a-nota-de-rodap\u00E9': 'footnote-reference',
u'refer\u00EAncia-a-cita\u00E7\u00E3o': 'citation-reference',
u'refer\u00EAncia-a-substitui\u00E7\u00E3o': 'substitution-reference',
'alvo': 'target',
u'refer\u00EAncia-a-uri': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
'cru': 'raw',}
"""Mapping of Brazilian Portuguese role names to canonical role names
for interpreted text."""
| apache-2.0 |
vsoch/deepdive | examples/tutorial_example/step2-generic-features/experiment-reports/v00002/code/udf/ext_has_spouse.py | 90 | 2326 | #! /usr/bin/env python
import csv, os, sys
# The directory of this UDF file
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
# Load the spouse dictionary for distant supervision.
# A person can have multiple spouses
spouses = set()
married_people = set()
lines = open(BASE_DIR + '/../data/spouses.tsv').readlines()
for line in lines:
name1, name2, relation = line.strip().split('\t')
spouses.add((name1, name2)) # Add a spouse relation pair
married_people.add(name1) # Record the person as married
married_people.add(name2)
# Load relations of people that are not spouse
# The non-spouse KB lists incompatible relations, e.g. childrens, siblings, parents.
non_spouses = set()
lines = open(BASE_DIR + '/../data/non-spouses.tsv').readlines()
for line in lines:
name1, name2, relation = line.strip().split('\t')
non_spouses.add((name1, name2)) # Add a non-spouse relation pair
# For each input tuple
for row in sys.stdin:
parts = row.strip().split('\t')
sentence_id, p1_id, p1_text, p2_id, p2_text = parts
p1_text = p1_text.strip()
p2_text = p2_text.strip()
p1_text_lower = p1_text.lower()
p2_text_lower = p2_text.lower()
# DS rule 1: true if they appear in spouse KB, false if they appear in non-spouse KB
is_true = '\N'
if (p1_text_lower, p2_text_lower) in spouses or \
(p2_text_lower, p1_text_lower) in spouses:
is_true = '1'
elif (p1_text_lower, p2_text_lower) in non_spouses or \
(p2_text_lower, p1_text_lower) in non_spouses:
is_true = '0'
# DS rule 3: false if they appear to be in same person
elif (p1_text == p2_text) or (p1_text in p2_text) or (p2_text in p1_text):
is_true = '0'
# DS rule 4 false if they are both married, but not married to each other:
elif p1_text_lower in married_people and p2_text_lower in married_people:
is_true = '0'
# Output relation candidates into output table
print '\t'.join([
p1_id, p2_id, sentence_id,
"%s-%s" %(p1_text, p2_text),
is_true,
"%s-%s" %(p1_id, p2_id),
'\N' # leave "id" blank for system!
])
# TABLE FORMAT: CREATE TABLE has_spouse(
# person1_id bigint,
# person2_id bigint,
# sentence_id bigint,
# description text,
# is_true boolean,
# relation_id bigint, -- unique identifier for has_spouse
# id bigint -- reserved for DeepDive
# );
| apache-2.0 |
juniorh/dummyDbGen | influxdb/genDocDb.influxdb_09.py | 1 | 4765 | #!/usr/bin/env python
# How to use
# >python genDocDb.influx.py -h localhost -P 8086 -u root -p password -d database -m measurement -n 1000 -t 1 -r default
import datetime
import logging
import argparse
import random
import time
import sys
from influxdb import InfluxDBClient
def get_args_parser():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"-h", "--host",
default="localhost",
nargs='?',
type=str,
help="Connect to host.")
parser.add_argument(
"-P", "--port",
default=8086,
nargs='?',
type=int,
help="Port number to use for connection.")
parser.add_argument(
"-u", "--user",
default=None,
nargs='?',
type=str,
help="User for login if not current user.")
parser.add_argument(
"-p", "--password",
default='',
nargs='?',
type=str,
help="Password to use when connecting to server.")
parser.add_argument(
"-d", "--database",
default=None,
nargs='?',
type=str,
help="Select variable")
parser.add_argument(
"-xd", "--no-create-database",
default=False,
action='store_true',
help="Don't create database")
parser.add_argument(
"-m", "--measurement",
default=None,
nargs='?',
type=str,
help="Select measurement")
parser.add_argument(
"-r", "--retention",
default=None,
nargs='?',
type=str,
help="Select retention policy")
parser.add_argument(
"--help",
default=False,
action='store_true',
help="Show this help")
parser.add_argument(
"-v","--verbose",
default=False,
action='store_true',
help="Print progress each 1s")
parser.add_argument(
"-n", "--number",
default='1',
nargs='?',
type=int,
help="How much point dummies will be generated")
parser.add_argument(
"-t", "--time",
default='1',
nargs='?',
type=int,
help="Time range (in second) each point is generated")
return parser
def genData(payload):
ltag1 = ["nTag1_1","nTag1_2","nTag1_3"]
ltag2 = ["nTag2_1","nTag2_2","nTag2_3","nTag2_4","nTag2_5","nTag2_6"]
ltag3 = ["nTag3_1","nTag3_3","nTag3_3","nTag3_4","nTag3_5","nTag3_6","nTag3_7","nTag3_8","nTag3_9"]
ltag4 = ["nTag4_1","nTag4_4","nTag4_3","nTag4_4","nTag4_5","nTag4_6","nTag4_7","nTag4_8","nTag4_9","nTag4_10","nTag4_11","nTag4_12"]
ltag5 = ["nTag5_1","nTag5_2","nTag5_3","nTag5_4","nTag5_5","nTag5_6","nTag5_7","nTag5_8","nTag5_9","nTag5_10","nTag5_11","nTag5_12","nTag5_13","nTag5_14","nTag5_15"]
tag1 = ltag1[int(random.random()*len(ltag1))]
tag2 = ltag2[int(random.random()*len(ltag2))]
tag3 = ltag3[int(random.random()*len(ltag3))]
tag4 = ltag4[int(random.random()*len(ltag4))]
tag5 = ltag5[int(random.random()*len(ltag5))]
value1 = random.randint(80,100)
value2 = random.randint(10,20)
value3 = random.randint(0,50)
value4 = random.randint(0,100)
value5 = random.random()*10
hostname = "server"+str(random.randint(0,100))
payload["tags"]={
"tag1": tag1,
"tag2": tag2,
"tag3": tag3,
"tag4": tag4,
"tag5": tag5
}
payload["points"][0]["fields"]={
"value1": value1,
"value2": value2,
"value3": value3,
"value4": value4,
"value5": value5,
"host": hostname
}
return payload
#main
if __name__ == '__main__':
parser = get_args_parser()
args = parser.parse_args()
db = None
if args.help or not args.user or not args.database or not args.measurement:
parser.print_help()
parser.exit()
try:
db = InfluxDBClient(args.host,args.port,args.user,args.password)
db.get_list_database()
except Exception, err:
logging.exception(err)
print err
sys.exit()
try:
if not args.no_create_database:
db.create_database(args.database)
except:
print "Error creating database"
db.switch_database(args.database)
#Generate dummy data
templateData = {
"database": args.database,
"tags":{},
"precision": "u",
"points":[
{
"name": args.measurement,
"fields": {}
}
]}
if args.retention:
templateData["retentionPolicy"] = args.retention
timeScale = 1000000 # us precision
timeInterval = args.time * timeScale
backMilliseconds = timeInterval * args.number
startTime = int(datetime.datetime.now().strftime('%s%f')) - backMilliseconds
print "Start at "+str(startTime)
t = time.time()
for i in range(0,backMilliseconds,timeInterval):
dummyData = genData(templateData)
dummyData["timestamp"] = startTime+i
#print dummyData["timestamp"]
#print dummyData
db.write(dummyData)
if args.verbose:
if time.time() - t > 1:
t = time.time()
print "rtDummy "+str((i/timeScale)/args.time+1)
print "FINISH"
| gpl-2.0 |
ceefour/opencog | tests/python/old/test_tree.py | 48 | 7173 | from unittest import TestCase
try:
from opencog.atomspace import AtomSpace, TruthValue, Atom #, Handle
from opencog.atomspace import types, get_type, get_type_name # is_a
except ImportError:
from atomspace_remote import AtomSpace, TruthValue, Atom #, Handle
from atomspace_remote import types, get_type, get_type_name # is_a
t=types
import tree
# run any doctests
import doctest
doctest.testmod(tree)
class TreeTest(TestCase):
def setUp(self):
self.a = AtomSpace()
self.x1 = self.a.add(t.ConceptNode,"test1")
self.x2 = self.a.add(t.ConceptNode,"test2")
self.l1 = self.a.add(t.Link, out=[self.x1,self.x2])
self.l2 = self.a.add(t.Link, out=[self.l1,self.x2])
print 'l1', self.l1
def tearDown(self):
del self.a
def test_atom_tree(self):
node_tree = tree.tree_from_atom(self.x1)
self.assertEquals(node_tree.is_leaf(), True)
def test_link_tree(self):
l_tree = tree.tree_from_atom(self.l1)
self.assertEquals(l_tree.is_leaf(), False)
# should be something like ('Link', 17, 18)
x = l_tree.to_tuple()
self.assertEquals(len(x), 3 )
def test_link_to_link_tree(self):
l_tree = tree.tree_from_atom(self.l2)
self.assertEquals(l_tree.is_leaf(), False)
# should be something like ('Link', ('Link', 13, 14), 14)
x = l_tree.to_tuple()
self.assertEquals(len(x), 3)
self.assertEquals(len(x[1]), 3)
self.assertEquals(x[1][2], x[2])
def test_compare(self):
l_tree1 = tree.tree_from_atom(self.l1)
l_tree = tree.tree_from_atom(self.l2)
self.assertEquals(l_tree1 > l_tree, False)
self.assertEquals(l_tree1 < l_tree, True)
def test_coerce_tree(self):
node_tree = tree.tree_from_atom(self.x1)
print str(node_tree)
self.assertEquals(tree.coerce_tree(node_tree),node_tree)
self.assertEquals(tree.coerce_tree(self.x1),node_tree)
self.assertEquals(tree.coerce_tree("tree").op,"tree")
def test_is_variable(self):
var_tree = tree.Var(1)
self.assertEquals(var_tree.is_variable(),True)
node_tree = tree.T(self.x1)
self.assertEquals(node_tree.is_variable(),False)
def test_unify(self):
T = tree.T
V = tree.Var
x1_template = T(self.x1)
x1_tree = tree.tree_from_atom(self.x1)
s = tree.unify(x1_template, x1_tree, {})
self.assertEquals(s, {})
x2_template = T(self.x2)
s = tree.unify(x2_template, x1_tree, {})
self.assertEquals(s, None)
all_template = V(1)
l2_tree = tree.tree_from_atom(self.l2)
s = tree.unify(all_template, l2_tree, {})
s_correct = {all_template : l2_tree}
self.assertEquals(s, s_correct)
t1 = V(1)
t2 = V(2)
s = tree.unify(t1, t2, {})
self.assertEquals(s, {V(1):V(2)})
t1 = V(1)
t2 = V(2)
s_correct = {V(1):V(2)}
s = tree.unify(t1, t2, s_correct)
self.assertEquals(s, s_correct)
t1 = T('blah',V(1))
t2 = T('blah',V(2))
s = tree.unify(t1, t2, {})
self.assertEquals(s, {V(1):V(2)})
t1 = T('blah',V(1), V(2))
t2 = T('blah',V(3), V(4))
s = tree.unify(t1, t2, {})
self.assertEquals(s, {V(1):V(3), V(2):V(4)})
t1 = T('blah',V(1), V(1))
t2 = T('blah',V(2), V(2))
s = tree.unify(t1, t2, {})
self.assertEquals(s, {V(1):V(2)})
def test_find_conj(self):
conj = (tree.tree_from_atom(self.l1), tree.tree_from_atom(self.l2))
matches = tree.find_conj(conj, self.a.get_atoms_by_type(t.Atom))
self.assertEquals(len(matches), 1)
if len(matches) == 1:
first = matches[0]
self.assertEquals(first.subst, {})
self.assertEquals(first.atoms, [self.l1, self.l2])
# Test whether find_conj can be used to find atoms for Psi Rules. That is not actually done in the code, but could be useful as an alternative approach.
# (This may be obsolete; an even better approach would be to use find_matching_conjunctions)
def test_find_conj2(self):
a = self.a
conj = (
a.add(t.AtTimeLink, out=[a.add(t.TimeNode, '11210347010'), a.add(t.EvaluationLink, out=[a.add(t.PredicateNode, 'increased'), a.add(t.ListLink, out=[a.add(t.EvaluationLink, out=[a.add(t.PredicateNode, 'EnergyDemandGoal'), a.add(t.ListLink, out=[])])])])]),
a.add(t.AtTimeLink, out=[a.add(t.TimeNode, '11210347000'), a.add(t.EvaluationLink, out=[a.add(t.PredicateNode, 'actionDone'), a.add(t.ListLink, out=[a.add(t.ExecutionLink, out=[a.add(t.GroundedSchemaNode, 'eat'), a.add(t.ListLink, out=[a.add(t.AccessoryNode, 'id_-54646')])])])])]),
a.add(t.SequentialAndLink, out=[a.add(t.TimeNode, '11210347000'), a.add(t.TimeNode, '11210347010')])
)
conj = tuple(map(tree.tree_from_atom, conj))
res = tree.find_conj(conj,a.get_atoms_by_type(t.Atom))
def test_find_conj3(self):
a = self.a
t1 = tree.atom_from_tree(tree.new_var(), a)
t2 = tree.atom_from_tree(tree.new_var(), a)
action = tree.atom_from_tree(tree.new_var(), a)
goal = tree.atom_from_tree(tree.new_var(), a)
conj = (
a.add(t.AtTimeLink, out=[t1, a.add(t.EvaluationLink, out=[a.add(t.PredicateNode, 'actionDone'), action])]),
a.add(t.AtTimeLink, out=[t2, a.add(t.EvaluationLink, out=[a.add(t.PredicateNode, 'increased'), a.add(t.ListLink, out=[a.add(t.EvaluationLink, out=[goal, a.add(t.ListLink, out=[])])])])]),
a.add(t.SequentialAndLink, out=[a.add(t.TimeNode, '11210347000'), a.add(t.TimeNode, '11210347010')])
)
conj = tuple(map(tree.tree_from_atom, conj))
res = tree.find_conj(conj,a.get_atoms_by_type(t.Atom))
def test_apply_rule(self):
atoms = [self.l1, self.l2]
# This is supposed to look up all Atoms of (exactly) type 'Link', and return their first outgoing atom
link_template = tree.T('Link', 1, 2)
first = tree.Var(1)
result_trees = tree.apply_rule(link_template, first, atoms)
result_correct = map(tree.tree_from_atom, [self.x1, self.l1])
self.assertEquals(result_trees, result_correct)
def test_standardize_apart(self):
var1, var2 = tree.Var(1), tree.Var(2)
tr1 = tree.T('ListLink', var1, var2)
tr2 = tree.standardize_apart(tr1)
print tr1
print tr2
self.assertNotEquals(tree.unify(tr1, tr2, {}), None)
var1_new, var2_new = tr2.args
self.assertNotEquals(var1_new, var2_new)
assert var1_new not in [var1, var2]
assert var2_new not in [var1, var2]
def test_canonical_trees(self):
conj = (
tree.T('ListLink', 1, 2),
tree.T('ListLink', 2, 3)
)
canon = tree.canonical_trees(conj)
print canon
| agpl-3.0 |
navodissa/python-flask | flask/lib/python2.7/site-packages/whoosh/qparser/default.py | 37 | 16940 | # Copyright 2011 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import sys
from whoosh import query
from whoosh.compat import text_type
from whoosh.qparser import syntax
from whoosh.qparser.common import print_debug, QueryParserError
# Query parser object
class QueryParser(object):
"""A hand-written query parser built on modular plug-ins. The default
configuration implements a powerful fielded query language similar to
Lucene's.
You can use the ``plugins`` argument when creating the object to override
the default list of plug-ins, and/or use ``add_plugin()`` and/or
``remove_plugin_class()`` to change the plug-ins included in the parser.
>>> from whoosh import qparser
>>> parser = qparser.QueryParser("content", schema)
>>> parser.remove_plugin_class(qparser.WildcardPlugin)
>>> parser.add_plugin(qparser.PrefixPlugin())
>>> parser.parse(u"hello there")
And([Term("content", u"hello"), Term("content", u"there")])
"""
def __init__(self, fieldname, schema, plugins=None, termclass=query.Term,
phraseclass=query.Phrase, group=syntax.AndGroup):
"""
:param fieldname: the default field -- the parser uses this as the
field for any terms without an explicit field.
:param schema: a :class:`whoosh.fields.Schema` object to use when
parsing. The appropriate fields in the schema will be used to
tokenize terms/phrases before they are turned into query objects.
You can specify None for the schema to create a parser that does
not analyze the text of the query, usually for testing purposes.
:param plugins: a list of plugins to use. WhitespacePlugin is
automatically included, do not put it in this list. This overrides
the default list of plugins. Classes in the list will be
automatically instantiated.
:param termclass: the query class to use for individual search terms.
The default is :class:`whoosh.query.Term`.
:param phraseclass: the query class to use for phrases. The default
is :class:`whoosh.query.Phrase`.
:param group: the default grouping. ``AndGroup`` makes terms required
by default. ``OrGroup`` makes terms optional by default.
"""
self.fieldname = fieldname
self.schema = schema
self.termclass = termclass
self.phraseclass = phraseclass
self.group = group
self.plugins = []
if not plugins:
plugins = self.default_set()
self._add_ws_plugin()
self.add_plugins(plugins)
def default_set(self):
"""Returns the default list of plugins to use.
"""
from whoosh.qparser import plugins
return [plugins.WhitespacePlugin(),
plugins.SingleQuotePlugin(),
plugins.FieldsPlugin(),
plugins.WildcardPlugin(),
plugins.PhrasePlugin(),
plugins.RangePlugin(),
plugins.GroupPlugin(),
plugins.OperatorsPlugin(),
plugins.BoostPlugin(),
plugins.EveryPlugin(),
]
def add_plugins(self, pins):
"""Adds the given list of plugins to the list of plugins in this
parser.
"""
for pin in pins:
self.add_plugin(pin)
def add_plugin(self, pin):
"""Adds the given plugin to the list of plugins in this parser.
"""
if isinstance(pin, type):
pin = pin()
self.plugins.append(pin)
def _add_ws_plugin(self):
from whoosh.qparser.plugins import WhitespacePlugin
self.add_plugin(WhitespacePlugin())
def remove_plugin(self, pi):
"""Removes the given plugin object from the list of plugins in this
parser.
"""
self.plugins.remove(pi)
def remove_plugin_class(self, cls):
"""Removes any plugins of the given class from this parser.
"""
self.plugins = [pi for pi in self.plugins if not isinstance(pi, cls)]
def replace_plugin(self, plugin):
"""Removes any plugins of the class of the given plugin and then adds
it. This is a convenience method to keep from having to call
``remove_plugin_class`` followed by ``add_plugin`` each time you want
to reconfigure a default plugin.
>>> qp = qparser.QueryParser("content", schema)
>>> qp.replace_plugin(qparser.NotPlugin("(^| )-"))
"""
self.remove_plugin_class(plugin.__class__)
self.add_plugin(plugin)
def _priorized(self, methodname):
# methodname is "taggers" or "filters". Returns a priorized list of
# tagger objects or filter functions.
items_and_priorities = []
for plugin in self.plugins:
# Call either .taggers() or .filters() on the plugin
method = getattr(plugin, methodname)
for item in method(self):
items_and_priorities.append(item)
# Sort the list by priority (lower priority runs first)
items_and_priorities.sort(key=lambda x: x[1])
# Return the sorted list without the priorities
return [item for item, _ in items_and_priorities]
def multitoken_query(self, spec, texts, fieldname, termclass, boost):
"""Returns a query for multiple texts. This method implements the
intention specified in the field's ``multitoken_query`` attribute,
which specifies what to do when strings that look like single terms
to the parser turn out to yield multiple tokens when analyzed.
:param spec: a string describing how to join the text strings into a
query. This is usually the value of the field's
``multitoken_query`` attribute.
:param texts: a list of token strings.
:param fieldname: the name of the field.
:param termclass: the query class to use for single terms.
:param boost: the original term's boost in the query string, should be
applied to the returned query object.
"""
spec = spec.lower()
if spec == "first":
# Throw away all but the first token
return termclass(fieldname, texts[0], boost=boost)
elif spec == "phrase":
# Turn the token into a phrase
return self.phraseclass(fieldname, texts, boost=boost)
else:
if spec == "default":
qclass = self.group.qclass
elif spec == "and":
qclass = query.And
elif spec == "or":
qclass = query.Or
else:
raise QueryParserError("Unknown multitoken_query value %r"
% spec)
return qclass([termclass(fieldname, t, boost=boost)
for t in texts])
def term_query(self, fieldname, text, termclass, boost=1.0, tokenize=True,
removestops=True):
"""Returns the appropriate query object for a single term in the query
string.
"""
if self.schema and fieldname in self.schema:
field = self.schema[fieldname]
# If this field type wants to parse queries itself, let it do so
# and return early
if field.self_parsing():
try:
q = field.parse_query(fieldname, text, boost=boost)
return q
except:
e = sys.exc_info()[1]
return query.error_query(e)
# Otherwise, ask the field to process the text into a list of
# tokenized strings
texts = list(field.process_text(text, mode="query",
tokenize=tokenize,
removestops=removestops))
# If the analyzer returned more than one token, use the field's
# multitoken_query attribute to decide what query class, if any, to
# use to put the tokens together
if len(texts) > 1:
return self.multitoken_query(field.multitoken_query, texts,
fieldname, termclass, boost)
# It's possible field.process_text() will return an empty list (for
# example, on a stop word)
if not texts:
return None
text = texts[0]
return termclass(fieldname, text, boost=boost)
def taggers(self):
"""Returns a priorized list of tagger objects provided by the parser's
currently configured plugins.
"""
return self._priorized("taggers")
def filters(self):
"""Returns a priorized list of filter functions provided by the
parser's currently configured plugins.
"""
return self._priorized("filters")
def tag(self, text, pos=0, debug=False):
"""Returns a group of syntax nodes corresponding to the given text,
created by matching the Taggers provided by the parser's plugins.
:param text: the text to tag.
:param pos: the position in the text to start tagging at.
"""
# The list out output tags
stack = []
# End position of the previous match
prev = pos
# Priorized list of taggers provided by the parser's plugins
taggers = self.taggers()
if debug:
print_debug(debug, "Taggers: %r" % taggers)
# Define a function that will make a WordNode from the "interstitial"
# text between matches
def inter(startchar, endchar):
n = syntax.WordNode(text[startchar:endchar])
n.startchar = startchar
n.endchar = endchar
return n
while pos < len(text):
node = None
# Try each tagger to see if it matches at the current position
for tagger in taggers:
node = tagger.match(self, text, pos)
if node is not None:
if node.endchar <= pos:
raise Exception("Token %r did not move cursor forward."
" (%r, %s)" % (tagger, text, pos))
if prev < pos:
tween = inter(prev, pos)
if debug:
print_debug(debug, "Tween: %r" % tween)
stack.append(tween)
if debug:
print_debug(debug, "Tagger: %r at %s: %r"
% (tagger, pos, node))
stack.append(node)
prev = pos = node.endchar
break
if not node:
# No taggers matched, move forward
pos += 1
# If there's unmatched text left over on the end, put it in a WordNode
if prev < len(text):
stack.append(inter(prev, len(text)))
# Wrap the list of nodes in a group node
group = self.group(stack)
if debug:
print_debug(debug, "Tagged group: %r" % group)
return group
def filterize(self, nodes, debug=False):
"""Takes a group of nodes and runs the filters provided by the parser's
plugins.
"""
# Call each filter in the priorized list of plugin filters
if debug:
print_debug(debug, "Pre-filtered group: %r" % nodes)
for f in self.filters():
if debug:
print_debug(debug, "..Applying: %r" % f)
nodes = f(self, nodes)
if debug:
print_debug(debug, "..Result: %r" % nodes)
if nodes is None:
raise Exception("Filter %r did not return anything" % f)
return nodes
def process(self, text, pos=0, debug=False):
"""Returns a group of syntax nodes corresponding to the given text,
tagged by the plugin Taggers and filtered by the plugin filters.
:param text: the text to tag.
:param pos: the position in the text to start tagging at.
"""
nodes = self.tag(text, pos=pos, debug=debug)
nodes = self.filterize(nodes, debug=debug)
return nodes
def parse(self, text, normalize=True, debug=False):
"""Parses the input string and returns a :class:`whoosh.query.Query`
object/tree.
:param text: the unicode string to parse.
:param normalize: whether to call normalize() on the query object/tree
before returning it. This should be left on unless you're trying to
debug the parser output.
:rtype: :class:`whoosh.query.Query`
"""
if not isinstance(text, text_type):
text = text.decode("latin1")
nodes = self.process(text, debug=debug)
if debug:
print_debug(debug, "Syntax tree: %r" % nodes)
q = nodes.query(self)
if not q:
q = query.NullQuery
if debug:
print_debug(debug, "Pre-normalized query: %r" % q)
if normalize:
q = q.normalize()
if debug:
print_debug(debug, "Normalized query: %r" % q)
return q
def parse_(self, text, normalize=True):
pass
# Premade parser configurations
def MultifieldParser(fieldnames, schema, fieldboosts=None, **kwargs):
"""Returns a QueryParser configured to search in multiple fields.
Instead of assigning unfielded clauses to a default field, this parser
transforms them into an OR clause that searches a list of fields. For
example, if the list of multi-fields is "f1", "f2" and the query string is
"hello there", the class will parse "(f1:hello OR f2:hello) (f1:there OR
f2:there)". This is very useful when you have two textual fields (e.g.
"title" and "content") you want to search by default.
:param fieldnames: a list of field names to search.
:param fieldboosts: an optional dictionary mapping field names to boosts.
"""
from whoosh.qparser.plugins import MultifieldPlugin
p = QueryParser(None, schema, **kwargs)
mfp = MultifieldPlugin(fieldnames, fieldboosts=fieldboosts)
p.add_plugin(mfp)
return p
def SimpleParser(fieldname, schema, **kwargs):
"""Returns a QueryParser configured to support only +, -, and phrase
syntax.
"""
from whoosh.qparser import plugins
pins = [plugins.WhitespacePlugin,
plugins.PlusMinusPlugin,
plugins.PhrasePlugin]
return QueryParser(fieldname, schema, plugins=pins, **kwargs)
def DisMaxParser(fieldboosts, schema, tiebreak=0.0, **kwargs):
"""Returns a QueryParser configured to support only +, -, and phrase
syntax, and which converts individual terms into DisjunctionMax queries
across a set of fields.
:param fieldboosts: a dictionary mapping field names to boosts.
"""
from whoosh.qparser import plugins
mfp = plugins.MultifieldPlugin(list(fieldboosts.keys()),
fieldboosts=fieldboosts,
group=syntax.DisMaxGroup)
pins = [plugins.WhitespacePlugin,
plugins.PlusMinusPlugin,
plugins.PhrasePlugin,
mfp]
return QueryParser(None, schema, plugins=pins, **kwargs)
| bsd-3-clause |
qinzhaokun/incubator-eagle | eagle-external/eagle-ambari/lib/EAGLE/package/scripts/eagle_service_check.py | 21 | 1067 | #!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from resource_management import *
class EagleServiceCheck(Script):
def service_check(self,env):
import params
env.set_params(params)
import actions
eagle_service_exec(action="status")
if __name__ == "__main__":
EagleServiceCheck().execute()
| apache-2.0 |
KaranToor/MA450 | google-cloud-sdk/lib/third_party/requests/structures.py | 149 | 3017 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import collections
from .compat import OrderedDict
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = OrderedDict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| apache-2.0 |
spaceof7/QGIS | python/plugins/processing/algs/qgis/QGISAlgorithmProvider.py | 2 | 12329 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QGISAlgorithmProvider.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
try:
import plotly # NOQA
hasPlotly = True
except:
hasPlotly = False
from qgis.core import (QgsApplication,
QgsProcessingProvider)
from processing.script.ScriptUtils import ScriptUtils
from .QgisAlgorithm import QgisAlgorithm
from .AddTableField import AddTableField
from .Aggregate import Aggregate
from .Aspect import Aspect
from .BasicStatistics import BasicStatisticsForField
from .CheckValidity import CheckValidity
from .ConcaveHull import ConcaveHull
from .CreateAttributeIndex import CreateAttributeIndex
from .CreateConstantRaster import CreateConstantRaster
from .Datasources2Vrt import Datasources2Vrt
from .DefineProjection import DefineProjection
from .Delaunay import Delaunay
from .DeleteColumn import DeleteColumn
from .DeleteDuplicateGeometries import DeleteDuplicateGeometries
from .DeleteHoles import DeleteHoles
from .DensifyGeometries import DensifyGeometries
from .DensifyGeometriesInterval import DensifyGeometriesInterval
from .Difference import Difference
from .EliminateSelection import EliminateSelection
from .EquivalentNumField import EquivalentNumField
from .ExecuteSQL import ExecuteSQL
from .Explode import Explode
from .ExportGeometryInfo import ExportGeometryInfo
from .ExtendLines import ExtendLines
from .ExtentFromLayer import ExtentFromLayer
from .ExtractSpecificNodes import ExtractSpecificNodes
from .FieldPyculator import FieldsPyculator
from .FieldsCalculator import FieldsCalculator
from .FieldsMapper import FieldsMapper
from .FindProjection import FindProjection
from .FixedDistanceBuffer import FixedDistanceBuffer
from .GeometryConvert import GeometryConvert
from .GeometryByExpression import GeometryByExpression
from .GridLine import GridLine
from .GridPolygon import GridPolygon
from .Heatmap import Heatmap
from .Hillshade import Hillshade
from .HubDistanceLines import HubDistanceLines
from .HubDistancePoints import HubDistancePoints
from .HypsometricCurves import HypsometricCurves
from .IdwInterpolation import IdwInterpolation
from .ImportIntoPostGIS import ImportIntoPostGIS
from .ImportIntoSpatialite import ImportIntoSpatialite
from .Intersection import Intersection
from .LinesToPolygons import LinesToPolygons
from .MinimumBoundingGeometry import MinimumBoundingGeometry
from .NearestNeighbourAnalysis import NearestNeighbourAnalysis
from .OffsetLine import OffsetLine
from .Orthogonalize import Orthogonalize
from .PointDistance import PointDistance
from .PointOnSurface import PointOnSurface
from .PointsAlongGeometry import PointsAlongGeometry
from .PointsDisplacement import PointsDisplacement
from .PointsFromLines import PointsFromLines
from .PointsFromPolygons import PointsFromPolygons
from .PointsInPolygon import PointsInPolygon
from .PointsLayerFromTable import PointsLayerFromTable
from .PointsToPaths import PointsToPaths
from .PoleOfInaccessibility import PoleOfInaccessibility
from .Polygonize import Polygonize
from .PolygonsToLines import PolygonsToLines
from .PostGISExecuteSQL import PostGISExecuteSQL
from .RandomExtract import RandomExtract
from .RandomExtractWithinSubsets import RandomExtractWithinSubsets
from .RandomPointsAlongLines import RandomPointsAlongLines
from .RandomPointsExtent import RandomPointsExtent
from .RandomPointsLayer import RandomPointsLayer
from .RandomPointsPolygons import RandomPointsPolygons
from .RandomSelection import RandomSelection
from .RandomSelectionWithinSubsets import RandomSelectionWithinSubsets
from .Rasterize import RasterizeAlgorithm
from .RasterCalculator import RasterCalculator
from .RasterLayerStatistics import RasterLayerStatistics
from .RectanglesOvalsDiamondsFixed import RectanglesOvalsDiamondsFixed
from .RectanglesOvalsDiamondsVariable import RectanglesOvalsDiamondsVariable
from .RegularPoints import RegularPoints
from .Relief import Relief
from .ReverseLineDirection import ReverseLineDirection
from .Ruggedness import Ruggedness
from .SelectByAttribute import SelectByAttribute
from .SelectByExpression import SelectByExpression
from .ServiceAreaFromLayer import ServiceAreaFromLayer
from .ServiceAreaFromPoint import ServiceAreaFromPoint
from .SetMValue import SetMValue
from .SetRasterStyle import SetRasterStyle
from .SetVectorStyle import SetVectorStyle
from .SetZValue import SetZValue
from .ShortestPathLayerToPoint import ShortestPathLayerToPoint
from .ShortestPathPointToLayer import ShortestPathPointToLayer
from .ShortestPathPointToPoint import ShortestPathPointToPoint
from .SingleSidedBuffer import SingleSidedBuffer
from .Slope import Slope
from .SnapGeometries import SnapGeometriesToLayer
from .SpatialiteExecuteSQL import SpatialiteExecuteSQL
from .SpatialIndex import SpatialIndex
from .SpatialJoin import SpatialJoin
from .SpatialJoinSummary import SpatialJoinSummary
from .StatisticsByCategories import StatisticsByCategories
from .SumLines import SumLines
from .SymmetricalDifference import SymmetricalDifference
from .TextToFloat import TextToFloat
from .TinInterpolation import TinInterpolation
from .TopoColors import TopoColor
from .TruncateTable import TruncateTable
from .Union import Union
from .UniqueValues import UniqueValues
from .VariableDistanceBuffer import VariableDistanceBuffer
from .VectorSplit import VectorSplit
from .VoronoiPolygons import VoronoiPolygons
from .ZonalStatistics import ZonalStatistics
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class QGISAlgorithmProvider(QgsProcessingProvider):
def __init__(self):
super().__init__()
self.algs = []
self.externalAlgs = []
def getAlgs(self):
algs = [AddTableField(),
Aggregate(),
Aspect(),
BasicStatisticsForField(),
CheckValidity(),
ConcaveHull(),
CreateAttributeIndex(),
CreateConstantRaster(),
Datasources2Vrt(),
DefineProjection(),
Delaunay(),
DeleteColumn(),
DeleteDuplicateGeometries(),
DeleteHoles(),
DensifyGeometries(),
DensifyGeometriesInterval(),
Difference(),
EliminateSelection(),
EquivalentNumField(),
ExecuteSQL(),
Explode(),
ExportGeometryInfo(),
ExtendLines(),
ExtentFromLayer(),
ExtractSpecificNodes(),
FieldsCalculator(),
FieldsMapper(),
FieldsPyculator(),
FindProjection(),
FixedDistanceBuffer(),
GeometryByExpression(),
GeometryConvert(),
GridLine(),
GridPolygon(),
Heatmap(),
Hillshade(),
HubDistanceLines(),
HubDistancePoints(),
HypsometricCurves(),
IdwInterpolation(),
ImportIntoPostGIS(),
ImportIntoSpatialite(),
Intersection(),
LinesToPolygons(),
MinimumBoundingGeometry(),
NearestNeighbourAnalysis(),
OffsetLine(),
Orthogonalize(),
PointDistance(),
PointOnSurface(),
PointsAlongGeometry(),
PointsDisplacement(),
PointsFromLines(),
PointsFromPolygons(),
PointsInPolygon(),
PointsLayerFromTable(),
PointsToPaths(),
PoleOfInaccessibility(),
Polygonize(),
PolygonsToLines(),
PostGISExecuteSQL(),
RandomExtract(),
RandomExtractWithinSubsets(),
RandomPointsAlongLines(),
RandomPointsExtent(),
RandomPointsLayer(),
RandomPointsPolygons(),
RandomSelection(),
RandomSelectionWithinSubsets(),
RasterCalculator(),
RasterizeAlgorithm(),
RasterLayerStatistics(),
RectanglesOvalsDiamondsFixed(),
RectanglesOvalsDiamondsVariable(),
RegularPoints(),
Relief(),
ReverseLineDirection(),
Ruggedness(),
SelectByAttribute(),
SelectByExpression(),
ServiceAreaFromLayer(),
ServiceAreaFromPoint(),
SetMValue(),
SetRasterStyle(),
SetVectorStyle(),
SetZValue(),
ShortestPathLayerToPoint(),
ShortestPathPointToLayer(),
ShortestPathPointToPoint(),
SingleSidedBuffer(),
Slope(),
SnapGeometriesToLayer(),
SpatialiteExecuteSQL(),
SpatialIndex(),
SpatialJoin(),
SpatialJoinSummary(),
StatisticsByCategories(),
SumLines(),
SymmetricalDifference(),
TextToFloat(),
TinInterpolation(),
TopoColor(),
TruncateTable(),
Union(),
UniqueValues(),
VariableDistanceBuffer(),
VectorSplit(),
VoronoiPolygons(),
ZonalStatistics()
]
if hasPlotly:
from .BarPlot import BarPlot
from .BoxPlot import BoxPlot
from .MeanAndStdDevPlot import MeanAndStdDevPlot
from .PolarPlot import PolarPlot
from .RasterLayerHistogram import RasterLayerHistogram
from .VectorLayerHistogram import VectorLayerHistogram
from .VectorLayerScatterplot import VectorLayerScatterplot
from .VectorLayerScatterplot3D import VectorLayerScatterplot3D
algs.extend([BarPlot(),
BoxPlot(),
MeanAndStdDevPlot(),
PolarPlot(),
RasterLayerHistogram(),
VectorLayerHistogram(),
VectorLayerScatterplot(),
VectorLayerScatterplot3D()])
# to store algs added by 3rd party plugins as scripts
folder = os.path.join(os.path.dirname(__file__), 'scripts')
scripts = ScriptUtils.loadFromFolder(folder)
for script in scripts:
script.allowEdit = False
algs.extend(scripts)
return algs
def id(self):
return 'qgis'
def name(self):
return 'QGIS'
def icon(self):
return QgsApplication.getThemeIcon("/providerQgis.svg")
def svgIconPath(self):
return QgsApplication.iconPath("providerQgis.svg")
def loadAlgorithms(self):
self.algs = self.getAlgs()
for a in self.algs:
self.addAlgorithm(a)
for a in self.externalAlgs:
self.addAlgorithm(a)
def supportsNonFileBasedOutput(self):
return True
| gpl-2.0 |
ogenstad/ansible | lib/ansible/modules/monitoring/zabbix/zabbix_group.py | 68 | 6570 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2013-2014, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zabbix_group
short_description: Zabbix host groups creates/deletes
description:
- Create host groups if they do not exist.
- Delete existing host groups if they exist.
version_added: "1.8"
author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
state:
description:
- Create or delete host group.
required: false
default: "present"
choices: [ "present", "absent" ]
host_groups:
description:
- List of host groups to create or delete.
required: true
aliases: [ "host_group" ]
extends_documentation_fragment:
- zabbix
notes:
- Too many concurrent updates to the same group may cause Zabbix to return errors, see examples for a workaround if needed.
'''
EXAMPLES = '''
# Base create host groups example
- name: Create host groups
local_action:
module: zabbix_group
server_url: http://monitor.example.com
login_user: username
login_password: password
state: present
host_groups:
- Example group1
- Example group2
# Limit the Zabbix group creations to one host since Zabbix can return an error when doing concurrent updates
- name: Create host groups
local_action:
module: zabbix_group
server_url: http://monitor.example.com
login_user: username
login_password: password
state: present
host_groups:
- Example group1
- Example group2
when: inventory_hostname==groups['group_name'][0]
'''
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
from zabbix_api import Already_Exists
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
from ansible.module_utils.basic import AnsibleModule
class HostGroup(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# create host group(s) if not exists
def create_host_group(self, group_names):
try:
group_add_list = []
for group_name in group_names:
result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.hostgroup.create({'name': group_name})
group_add_list.append(group_name)
except Already_Exists:
return group_add_list
return group_add_list
except Exception as e:
self._module.fail_json(msg="Failed to create host group(s): %s" % e)
# delete host group(s)
def delete_host_group(self, group_ids):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.hostgroup.delete(group_ids)
except Exception as e:
self._module.fail_json(msg="Failed to delete host group(s), Exception: %s" % e)
# get group ids by name
def get_group_ids(self, host_groups):
group_ids = []
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': host_groups}})
for group in group_list:
group_id = group['groupid']
group_ids.append(group_id)
return group_ids, group_list
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
host_groups=dict(type='list', required=True, aliases=['host_group']),
state=dict(default="present", choices=['present', 'absent']),
timeout=dict(type='int', default=10)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
validate_certs = module.params['validate_certs']
host_groups = module.params['host_groups']
state = module.params['state']
timeout = module.params['timeout']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password,
validate_certs=validate_certs)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
hostGroup = HostGroup(module, zbx)
group_ids = []
group_list = []
if host_groups:
group_ids, group_list = hostGroup.get_group_ids(host_groups)
if state == "absent":
# delete host groups
if group_ids:
delete_group_names = []
hostGroup.delete_host_group(group_ids)
for group in group_list:
delete_group_names.append(group['name'])
module.exit_json(changed=True,
result="Successfully deleted host group(s): %s." % ",".join(delete_group_names))
else:
module.exit_json(changed=False, result="No host group(s) to delete.")
else:
# create host groups
group_add_list = hostGroup.create_host_group(host_groups)
if len(group_add_list) > 0:
module.exit_json(changed=True, result="Successfully created host group(s): %s" % group_add_list)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
sloria/modular-odm | tests/queries/test_foreign_queries.py | 4 | 2512 | # -*- coding: utf-8 -*-
from nose.tools import * # PEP8 asserts
from modularodm import fields, StoredObject
from modularodm.query.query import RawQuery as Q
from tests.base import ModularOdmTestCase
class TestForeignQueries(ModularOdmTestCase):
def define_objects(self):
class Foo(StoredObject):
_id = fields.StringField(primary=True)
_meta = {
'optimistic': True,
}
class Bar(StoredObject):
_id = fields.StringField(primary=True)
ref = fields.ForeignField('foo', backref='my_ref')
abs_ref = fields.AbstractForeignField(backref='my_abs_ref')
ref_list = fields.ForeignField('foo', backref='my_ref_list', list=True)
abs_ref_list = fields.AbstractForeignField(backref='my_abs_ref_list', list=True)
_meta = {
'optimistic': True,
}
return Foo, Bar
def set_up_objects(self):
self.foos = []
for _ in range(5):
foo = self.Foo()
foo.save()
self.foos.append(foo)
def test_eq_foreign(self):
bar = self.Bar(ref=self.foos[0])
bar.save()
result = self.Bar.find(
Q('ref', 'eq', self.foos[0])
)
assert_equal(len(result), 1)
result = self.Bar.find(
Q('ref', 'eq', self.foos[-1])
)
assert_equal(len(result), 0)
def test_eq_foreign_list(self):
bar = self.Bar(ref_list=self.foos[:3])
bar.save()
result = self.Bar.find(
Q('ref_list', 'eq', self.foos[0])
)
assert_equal(len(result), 1)
result = self.Bar.find(
Q('ref_list', 'eq', self.foos[-1])
)
assert_equal(len(result), 0)
def test_eq_abstract(self):
bar = self.Bar(abs_ref=self.foos[0])
bar.save()
result = self.Bar.find(
Q('abs_ref', 'eq', self.foos[0])
)
assert_equal(len(result), 1)
result = self.Bar.find(
Q('abs_ref', 'eq', self.foos[-1])
)
assert_equal(len(result), 0)
def test_eq_abstract_list(self):
bar = self.Bar(abs_ref_list=self.foos[:3])
bar.save()
result = self.Bar.find(
Q('abs_ref_list', 'eq', self.foos[0])
)
assert_equal(len(result), 1)
result = self.Bar.find(
Q('abs_ref_list', 'eq', self.foos[-1])
)
assert_equal(len(result), 0)
| apache-2.0 |
zurawiki/advert-crm | mysite/views.py | 1 | 2242 | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.forms import ModelForm
from django.http import HttpResponseRedirect
from django.shortcuts import render
from contracts.models import Advertiser
__author__ = 'roger'
class AdvertiserForm(ModelForm):
class Meta:
model = Advertiser
exclude = ['approved', 'email']
help_texts = {
'salesperson':
'(optional) If you were contacted by someone at The Harvard Lampoon, you can select their name here.',
}
@login_required
def register_advertiser(request):
if request.method == 'POST':
form = AdvertiserForm(request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
advertiser = form.save(commit=False)
if not advertiser.email:
advertiser.email = request.user.email
advertiser.save()
form.save_m2m()
profile = request.user.get_profile()
profile.advertiser = advertiser
profile.save()
# if no user info, update it here
if not request.user.first_name:
if len(advertiser.contact.split()) == 2:
request.user.first_name = advertiser.contact.split()[0]
request.user.last_name = advertiser.contact.split()[1]
else:
request.user.first_name = advertiser.contact
request.user.save()
messages.success(request, 'Advertiser profile updated.')
if 'next' in request.GET:
return HttpResponseRedirect(request.GET['next'])
else:
return HttpResponseRedirect('/')
else:
profile = request.user.get_profile()
data = {}
data['form'] = AdvertiserForm(instance=profile.advertiser, initial={'email': request.user.email})
if profile.advertiser is None:
messages.warning(request, 'Before ordering an ad contract, you must first fill in your contact profile.')
else:
data['approved'] = profile.advertiser.approved
return render(request, 'advertiser/form.html', data)
| mit |
brownman/selenium-webdriver | selenium/src/py/lib/docutils/transforms/__init__.py | 5 | 6866 | # Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 3892 $
# Date: $Date: 2005-09-20 22:04:53 +0200 (Tue, 20 Sep 2005) $
# Copyright: This module has been placed in the public domain.
"""
This package contains modules for standard tree transforms available
to Docutils components. Tree transforms serve a variety of purposes:
- To tie up certain syntax-specific "loose ends" that remain after the
initial parsing of the input plaintext. These transforms are used to
supplement a limited syntax.
- To automate the internal linking of the document tree (hyperlink
references, footnote references, etc.).
- To extract useful information from the document tree. These
transforms may be used to construct (for example) indexes and tables
of contents.
Each transform is an optional step that a Docutils Reader may choose to
perform on the parsed document, depending on the input context. A Docutils
Reader may also perform Reader-specific transforms before or after performing
these standard transforms.
"""
__docformat__ = 'reStructuredText'
from docutils import languages, ApplicationError, TransformSpec
class TransformError(ApplicationError): pass
class Transform:
"""
Docutils transform component abstract base class.
"""
default_priority = None
"""Numerical priority of this transform, 0 through 999 (override)."""
def __init__(self, document, startnode=None):
"""
Initial setup for in-place document transforms.
"""
self.document = document
"""The document tree to transform."""
self.startnode = startnode
"""Node from which to begin the transform. For many transforms which
apply to the document as a whole, `startnode` is not set (i.e. its
value is `None`)."""
self.language = languages.get_language(
document.settings.language_code)
"""Language module local to this document."""
def apply(self, **kwargs):
"""Override to apply the transform to the document tree."""
raise NotImplementedError('subclass must override this method')
class Transformer(TransformSpec):
"""
Stores transforms (`Transform` classes) and applies them to document
trees. Also keeps track of components by component type name.
"""
def __init__(self, document):
self.transforms = []
"""List of transforms to apply. Each item is a 3-tuple:
``(priority string, transform class, pending node or None)``."""
self.unknown_reference_resolvers = []
"""List of hook functions which assist in resolving references"""
self.document = document
"""The `nodes.document` object this Transformer is attached to."""
self.applied = []
"""Transforms already applied, in order."""
self.sorted = 0
"""Boolean: is `self.tranforms` sorted?"""
self.components = {}
"""Mapping of component type name to component object. Set by
`self.populate_from_components()`."""
self.serialno = 0
"""Internal serial number to keep track of the add order of
transforms."""
def add_transform(self, transform_class, priority=None, **kwargs):
"""
Store a single transform. Use `priority` to override the default.
`kwargs` is a dictionary whose contents are passed as keyword
arguments to the `apply` method of the transform. This can be used to
pass application-specific data to the transform instance.
"""
if priority is None:
priority = transform_class.default_priority
priority_string = self.get_priority_string(priority)
self.transforms.append(
(priority_string, transform_class, None, kwargs))
self.sorted = 0
def add_transforms(self, transform_list):
"""Store multiple transforms, with default priorities."""
for transform_class in transform_list:
priority_string = self.get_priority_string(
transform_class.default_priority)
self.transforms.append(
(priority_string, transform_class, None, {}))
self.sorted = 0
def add_pending(self, pending, priority=None):
"""Store a transform with an associated `pending` node."""
transform_class = pending.transform
if priority is None:
priority = transform_class.default_priority
priority_string = self.get_priority_string(priority)
self.transforms.append(
(priority_string, transform_class, pending, {}))
self.sorted = 0
def get_priority_string(self, priority):
"""
Return a string, `priority` combined with `self.serialno`.
This ensures FIFO order on transforms with identical priority.
"""
self.serialno += 1
return '%03d-%03d' % (priority, self.serialno)
def populate_from_components(self, components):
"""
Store each component's default transforms, with default priorities.
Also, store components by type name in a mapping for later lookup.
"""
for component in components:
if component is None:
continue
self.add_transforms(component.get_transforms())
self.components[component.component_type] = component
self.sorted = 0
# Set up all of the reference resolvers for this transformer. Each
# component of this transformer is able to register its own helper
# functions to help resolve references.
unknown_reference_resolvers = []
for i in components:
unknown_reference_resolvers.extend(i.unknown_reference_resolvers)
decorated_list = [(f.priority, f) for f in unknown_reference_resolvers]
decorated_list.sort()
self.unknown_reference_resolvers.extend([f[1] for f in decorated_list])
def apply_transforms(self):
"""Apply all of the stored transforms, in priority order."""
self.document.reporter.attach_observer(
self.document.note_transform_message)
while self.transforms:
if not self.sorted:
# Unsorted initially, and whenever a transform is added.
self.transforms.sort()
self.transforms.reverse()
self.sorted = 1
priority, transform_class, pending, kwargs = self.transforms.pop()
transform = transform_class(self.document, startnode=pending)
transform.apply(**kwargs)
self.applied.append((priority, transform_class, pending, kwargs))
| apache-2.0 |
SaintAttila/attila | attila/notifications/files.py | 1 | 4027 | """
Bindings for sending notifications to file objects.
"""
from distutils.util import strtobool
from ..abc.configurations import Configurable
from ..abc.files import Path
from ..abc.notifications import Notifier
from ..configurations import ConfigManager
from ..exceptions import OperationNotSupportedError, verify_type
from ..plugins import config_loader
__author__ = 'Aaron Hosford'
__all__ = [
'FileNotifier',
]
# TODO: Should this inherit from connection?
@config_loader
class FileNotifier(Notifier, Configurable):
"""
A file notifier passes incoming notifications to an arbitrary file.
"""
@classmethod
def load_config_value(cls, manager, value, *args, **kwargs):
"""
Load a class instance from the value of a config option.
:param manager: A ConfigManager instance.
:param value: The string value of the option.
:return: A new instance of this class.
"""
verify_type(manager, ConfigManager)
assert isinstance(manager, ConfigManager)
verify_type(value, str, non_empty=True)
path = Path.load_config_value(manager, value)
assert isinstance(path, Path)
return cls(*args, path=path, **kwargs)
@classmethod
def load_config_section(cls, manager, section, *args, **kwargs):
"""
Load a class instance from a config section.
:param manager: A ConfigManager instance.
:param section: The name of the section.
:return: A new instance of this class.
"""
verify_type(manager, ConfigManager)
assert isinstance(manager, ConfigManager)
verify_type(section, str, non_empty=True)
path = manager.load_option(section, 'Path', Path, None)
if path is None:
path = manager.load_section(section, Path)
assert isinstance(path, Path)
append = manager.load_option(section, 'Append', strtobool, True)
format_string = manager.load_option(section, 'Format', str, None)
if format_string is None:
format_path = manager.load_option(section, 'Format Path', Path, None)
if format_path is not None:
assert isinstance(format_path, Path)
with format_path.open():
format_string = format_path.read()
encoding = manager.load_option(section, 'Encoding', str, None)
return cls(
*args,
path=path,
append=append,
format_string=format_string,
encoding=encoding,
**kwargs
)
def __init__(self, path=None, append=True, format_string=None, encoding=None):
verify_type(append, bool)
if format_string is not None:
verify_type(format_string, str)
if encoding is not None:
verify_type(encoding, str)
if isinstance(path, str):
path = Path(path)
file_obj = path.open(mode=('a' if append else 'w'), encoding=encoding)
elif isinstance(path, Path):
file_obj = path.open(mode=('a' if append else 'w'), encoding=encoding)
else:
file_obj = path
assert hasattr(file_obj, 'write')
super().__init__()
self._format_string = format_string
self._file_obj = file_obj
def __call__(self, *args, attachments=None, **kwargs):
"""
Send a notification on this notifier's channel.
:param attachments: The file attachments, if any, to include in the notification.
:return: None
"""
if attachments is not None:
raise OperationNotSupportedError("File attachments are unsupported.")
if self._format_string is None:
print(*args, file=self._file_obj, **kwargs)
else:
data = self.interpolate(self._format_string, args, kwargs)
self._file_obj.write(data)
def close(self):
"""Close the file notifier, and its associated file."""
self._file_obj.close()
| mit |
shl3807/shadowsocks | shadowsocks/tcprelay.py | 27 | 25672 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import socket
import errno
import struct
import logging
import traceback
import random
import encrypt
import eventloop
import utils
from common import parse_header
TIMEOUTS_CLEAN_SIZE = 512
TIMEOUT_PRECISION = 4
MSG_FASTOPEN = 0x20000000
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# local:
# stage 0 init
# stage 1 hello received, hello sent
# stage 2 UDP assoc
# stage 3 DNS
# stage 4 addr received, reply sent
# stage 5 remote connected
# remote:
# stage 0 init
# stage 3 DNS
# stage 4 addr received, reply sent
# stage 5 remote connected
STAGE_INIT = 0
STAGE_HELLO = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_REPLY = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# stream direction
STREAM_UP = 0
STREAM_DOWN = 1
# stream wait status
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local):
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
self._is_local = is_local
self._stage = STAGE_INIT
self._encryptor = encrypt.Encryptor(config['password'],
config['method'])
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._remote_address = None
if is_local:
self._chosen_server = self._get_a_server()
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR)
self.last_activity = 0
self._update_activity()
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def remote_address(self):
return self._remote_address
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
logging.debug('chosen server: %s:%d', server, server_port)
# TODO support multiple server IP
return server, server_port
def _update_activity(self):
self._server.update_activity(self)
def _update_stream(self, stream, status):
dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if dirty:
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
return True
def _handle_stage_reply(self, data):
if self._is_local:
data = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
if self._is_local and not self._fastopen_connected and \
self._config['fast_open']:
try:
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR)
data = ''.join(self._data_to_write_to_local)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_local = [data]
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
else:
self._data_to_write_to_local = []
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
self._stage = STAGE_STREAM
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _handle_stage_hello(self, data):
try:
if self._is_local:
cmd = ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('UDP associate')
if self._local_sock.family == socket.AF_INET6:
header = '\x05\x00\x00\x04'
else:
header = '\x05\x00\x00\x01'
addr, port = self._local_sock.getsockname()
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting %s:%d' % (remote_addr, remote_port))
self._remote_address = (remote_addr, remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock('\x05\x00\x00\x01\x00\x00\x00\x00\x10\x10',
self._local_sock)
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
except Exception as e:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
logging.error(error)
self.destroy()
return
if result:
ip = result[1]
if ip:
try:
self._stage = STAGE_REPLY
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# wait for more data to arrive and send them in one SYN
self._stage = STAGE_REPLY
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT)
self._stage = STAGE_REPLY
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except (OSError, IOError) as e:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _on_local_read(self):
self._update_activity()
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
if not is_local:
data = self._encryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
if self._is_local:
data = self._encryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock('\x05\00', self._local_sock)
self._stage = STAGE_HELLO
return
elif self._stage == STAGE_REPLY:
self._handle_stage_reply(data)
elif (is_local and self._stage == STAGE_HELLO) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_hello(data)
def _on_remote_read(self):
self._update_activity()
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
if self._is_local:
data = self._encryptor.decrypt(data)
else:
data = self._encryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
if self._data_to_write_to_local:
data = ''.join(self._data_to_write_to_local)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = ''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error(eventloop.get_sock_error(self._local_sock))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error(eventloop.get_sock_error(self._remote_sock))
self.destroy()
def handle_event(self, sock, event):
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def destroy(self):
if self._stage == STAGE_DESTROYED:
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPRelay(object):
def __init__(self, config, dns_resolver, is_local):
self._config = config
self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._last_time = time.time()
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
else:
listen_addr = config['server']
listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
if config['fast_open']:
try:
server_socket.setsockopt(socket.SOL_TCP, 23, 5)
except socket.error:
logging.error('warning: fast open is not available')
self._config['fast_open'] = False
server_socket.listen(1024)
self._server_socket = server_socket
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
loop.add_handler(self._handle_events)
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler):
""" set handler to active """
now = int(time.time())
if now - handler.last_activity < TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(utils.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def _handle_events(self, events):
for sock, fd, event in events:
if sock:
logging.log(utils.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
conn = self._server_socket.accept()
TCPRelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver, self._is_local)
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
continue
else:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
now = time.time()
if now - self._last_time > TIMEOUT_PRECISION:
self._sweep_timeout()
self._last_time = now
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('closed listen port %d', self._listen_port)
if not self._fd_to_handlers:
self._eventloop.remove_handler(self._handle_events)
def close(self, next_tick=False):
self._closed = True
if not next_tick:
self._server_socket.close()
| mit |
shepdelacreme/ansible | test/units/plugins/inventory/test_inventory.py | 49 | 7477 | # Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import string
import textwrap
from ansible import constants as C
from units.compat import mock
from units.compat import unittest
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
from units.mock.path import mock_unfrackpath_noop
from ansible.inventory.manager import InventoryManager, split_host_pattern
from units.mock.loader import DictDataLoader
class TestInventory(unittest.TestCase):
patterns = {
'a': ['a'],
'a, b': ['a', 'b'],
'a , b': ['a', 'b'],
' a,b ,c[1:2] ': ['a', 'b', 'c[1:2]'],
'9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9'],
'9a01:7f8:191:7701::9,9a01:7f8:191:7701::9,foo': ['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo'],
'foo[1:2]': ['foo[1:2]'],
'a::b': ['a::b'],
'a:b': ['a', 'b'],
' a : b ': ['a', 'b'],
'foo:bar:baz[1:2]': ['foo', 'bar', 'baz[1:2]'],
}
pattern_lists = [
[['a'], ['a']],
[['a', 'b'], ['a', 'b']],
[['a, b'], ['a', 'b']],
[['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9,foo'],
['9a01:7f8:191:7701::9', '9a01:7f8:191:7701::9', 'foo']]
]
# pattern_string: [ ('base_pattern', (a,b)), ['x','y','z'] ]
# a,b are the bounds of the subscript; x..z are the results of the subscript
# when applied to string.ascii_letters.
subscripts = {
'a': [('a', None), list(string.ascii_letters)],
'a[0]': [('a', (0, None)), ['a']],
'a[1]': [('a', (1, None)), ['b']],
'a[2:3]': [('a', (2, 3)), ['c', 'd']],
'a[-1]': [('a', (-1, None)), ['Z']],
'a[-2]': [('a', (-2, None)), ['Y']],
'a[48:]': [('a', (48, -1)), ['W', 'X', 'Y', 'Z']],
'a[49:]': [('a', (49, -1)), ['X', 'Y', 'Z']],
'a[1:]': [('a', (1, -1)), list(string.ascii_letters[1:])],
}
ranges_to_expand = {
'a[1:2]': ['a1', 'a2'],
'a[1:10:2]': ['a1', 'a3', 'a5', 'a7', 'a9'],
'a[a:b]': ['aa', 'ab'],
'a[a:i:3]': ['aa', 'ad', 'ag'],
'a[a:b][c:d]': ['aac', 'aad', 'abc', 'abd'],
'a[0:1][2:3]': ['a02', 'a03', 'a12', 'a13'],
'a[a:b][2:3]': ['aa2', 'aa3', 'ab2', 'ab3'],
}
def setUp(self):
fake_loader = DictDataLoader({})
self.i = InventoryManager(loader=fake_loader, sources=[None])
def test_split_patterns(self):
for p in self.patterns:
r = self.patterns[p]
self.assertEqual(r, split_host_pattern(p))
for p, r in self.pattern_lists:
self.assertEqual(r, split_host_pattern(p))
def test_ranges(self):
for s in self.subscripts:
r = self.subscripts[s]
self.assertEqual(r[0], self.i._split_subscript(s))
self.assertEqual(
r[1],
self.i._apply_subscript(
list(string.ascii_letters),
r[0][1]
)
)
class TestInventoryPlugins(unittest.TestCase):
def test_empty_inventory(self):
inventory = self._get_inventory('')
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
self.assertFalse(inventory.groups['all'].get_hosts())
self.assertFalse(inventory.groups['ungrouped'].get_hosts())
def test_ini(self):
self._test_default_groups("""
host1
host2
host3
[servers]
host3
host4
host5
""")
def test_ini_explicit_ungrouped(self):
self._test_default_groups("""
[ungrouped]
host1
host2
host3
[servers]
host3
host4
host5
""")
def test_ini_variables_stringify(self):
values = ['string', 'no', 'No', 'false', 'FALSE', [], False, 0]
inventory_content = "host1 "
inventory_content += ' '.join(['var%s=%s' % (i, to_text(x)) for i, x in enumerate(values)])
inventory = self._get_inventory(inventory_content)
variables = inventory.get_host('host1').vars
for i in range(len(values)):
if isinstance(values[i], string_types):
self.assertIsInstance(variables['var%s' % i], string_types)
else:
self.assertIsInstance(variables['var%s' % i], type(values[i]))
@mock.patch('ansible.inventory.manager.unfrackpath', mock_unfrackpath_noop)
@mock.patch('os.path.exists', lambda x: True)
@mock.patch('os.access', lambda x, y: True)
def test_yaml_inventory(self, filename="test.yaml"):
inventory_content = {filename: textwrap.dedent("""\
---
all:
hosts:
test1:
test2:
""")}
C.INVENTORY_ENABLED = ['yaml']
fake_loader = DictDataLoader(inventory_content)
im = InventoryManager(loader=fake_loader, sources=filename)
self.assertTrue(im._inventory.hosts)
self.assertIn('test1', im._inventory.hosts)
self.assertIn('test2', im._inventory.hosts)
self.assertIn(im._inventory.get_host('test1'), im._inventory.groups['all'].hosts)
self.assertIn(im._inventory.get_host('test2'), im._inventory.groups['all'].hosts)
self.assertEqual(len(im._inventory.groups['all'].hosts), 2)
self.assertIn(im._inventory.get_host('test1'), im._inventory.groups['ungrouped'].hosts)
self.assertIn(im._inventory.get_host('test2'), im._inventory.groups['ungrouped'].hosts)
self.assertEqual(len(im._inventory.groups['ungrouped'].hosts), 2)
def _get_inventory(self, inventory_content):
fake_loader = DictDataLoader({__file__: inventory_content})
return InventoryManager(loader=fake_loader, sources=[__file__])
def _test_default_groups(self, inventory_content):
inventory = self._get_inventory(inventory_content)
self.assertIn('all', inventory.groups)
self.assertIn('ungrouped', inventory.groups)
all_hosts = set(host.name for host in inventory.groups['all'].get_hosts())
self.assertEqual(set(['host1', 'host2', 'host3', 'host4', 'host5']), all_hosts)
ungrouped_hosts = set(host.name for host in inventory.groups['ungrouped'].get_hosts())
self.assertEqual(set(['host1', 'host2']), ungrouped_hosts)
servers_hosts = set(host.name for host in inventory.groups['servers'].get_hosts())
self.assertEqual(set(['host3', 'host4', 'host5']), servers_hosts)
| gpl-3.0 |
Daniel-CA/odoo | addons/product_expiry/product_expiry.py | 6 | 5716 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import openerp
from openerp import api, models
from openerp.osv import fields, osv
class stock_production_lot(osv.osv):
_inherit = 'stock.production.lot'
def _get_date(dtype):
"""Return a function to compute the limit date for this type"""
def calc_date(self, cr, uid, context=None):
"""Compute the limit date for a given date"""
if context is None:
context = {}
if not context.get('product_id', False):
date = False
else:
product = openerp.registry(cr.dbname)['product.product'].browse(
cr, uid, context['product_id'])
duration = getattr(product, dtype)
# set date to False when no expiry time specified on the product
date = duration and (datetime.datetime.today()
+ datetime.timedelta(days=duration))
return date and date.strftime('%Y-%m-%d %H:%M:%S') or False
return calc_date
_columns = {
'life_date': fields.datetime('End of Life Date',
help='This is the date on which the goods with this Serial Number may become dangerous and must not be consumed.'),
'use_date': fields.datetime('Best before Date',
help='This is the date on which the goods with this Serial Number start deteriorating, without being dangerous yet.'),
'removal_date': fields.datetime('Removal Date',
help='This is the date on which the goods with this Serial Number should be removed from the stock.'),
'alert_date': fields.datetime('Alert Date',
help="This is the date on which an alert should be notified about the goods with this Serial Number."),
}
# Assign dates according to products data
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
context['product_id'] = vals.get('product_id', context.get('default_product_id') or context.get('product_id'))
return super(stock_production_lot, self).create(cr, uid, vals, context=context)
_defaults = {
'life_date': _get_date('life_time'),
'use_date': _get_date('use_time'),
'removal_date': _get_date('removal_time'),
'alert_date': _get_date('alert_time'),
}
# Onchange added in new api to avoid having to change views
class StockProductionLot(models.Model):
_inherit = 'stock.production.lot'
@api.onchange('product_id')
def _onchange_product(self):
defaults = self.with_context(
product_id=self.product_id.id).default_get(
['life_date', 'use_date', 'removal_date', 'alert_date'])
for field, value in defaults.items():
setattr(self, field, value)
class stock_quant(osv.osv):
_inherit = 'stock.quant'
def _get_quants(self, cr, uid, ids, context=None):
return self.pool.get('stock.quant').search(cr, uid, [('lot_id', 'in', ids)], context=context)
_columns = {
'removal_date': fields.related('lot_id', 'removal_date', type='datetime', string='Removal Date',
store={
'stock.quant': (lambda self, cr, uid, ids, ctx: ids, ['lot_id'], 20),
'stock.production.lot': (_get_quants, ['removal_date'], 20),
}),
}
def apply_removal_strategy(self, cr, uid, location, product, qty, domain, removal_strategy, context=None):
if removal_strategy == 'fefo':
order = 'removal_date, location_id, package_id, lot_id, in_date, id'
return self._quants_get_order(cr, uid, location, product, qty, domain, order, context=context)
return super(stock_quant, self).apply_removal_strategy(cr, uid, location, product, qty, domain, removal_strategy, context=context)
class product_product(osv.osv):
_inherit = 'product.template'
_columns = {
'life_time': fields.integer('Product Life Time',
help='When a new a Serial Number is issued, this is the number of days before the goods may become dangerous and must not be consumed.'),
'use_time': fields.integer('Product Use Time',
help='When a new a Serial Number is issued, this is the number of days before the goods starts deteriorating, without being dangerous yet.'),
'removal_time': fields.integer('Product Removal Time',
help='When a new a Serial Number is issued, this is the number of days before the goods should be removed from the stock.'),
'alert_time': fields.integer('Product Alert Time',
help='When a new a Serial Number is issued, this is the number of days before an alert should be notified.'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
playm2mboy/edx-platform | lms/djangoapps/mobile_api/social_facebook/courses/views.py | 42 | 2384 | """
Views for courses info API
"""
from rest_framework import generics, status
from rest_framework.response import Response
from courseware.access import is_mobile_available_for_user
from student.models import CourseEnrollment
from lms.djangoapps.mobile_api.social_facebook.courses import serializers
from ...users.serializers import CourseEnrollmentSerializer
from ...utils import mobile_view
from ..utils import get_friends_from_facebook, get_linked_edx_accounts, share_with_facebook_friends
@mobile_view()
class CoursesWithFriends(generics.ListAPIView):
"""
**Use Case**
API endpoint for retrieving all the courses that a user's friends are in.
Note that only friends that allow their courses to be shared will be included.
**Example request**
GET /api/mobile/v0.5/social/facebook/courses/friends
**Response Values**
See UserCourseEnrollmentsList in lms/djangoapps/mobile_api/users for the structure of the response values.
"""
serializer_class = serializers.CoursesWithFriendsSerializer
def list(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.GET)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Get friends from Facebook
result = get_friends_from_facebook(serializer)
if not isinstance(result, list):
return result
friends_that_are_edx_users = get_linked_edx_accounts(result)
# Filter by sharing preferences
users_with_sharing = [
friend for friend in friends_that_are_edx_users if share_with_facebook_friends(friend)
]
# Get unique enrollments
enrollments = []
for friend in users_with_sharing:
query_set = CourseEnrollment.objects.filter(
user_id=friend['edX_id']
).exclude(course_id__in=[enrollment.course_id for enrollment in enrollments])
enrollments.extend(query_set)
# Get course objects
courses = [
enrollment for enrollment in enrollments if enrollment.course
and is_mobile_available_for_user(self.request.user, enrollment.course)
]
serializer = CourseEnrollmentSerializer(courses, context={'request': request}, many=True)
return Response(serializer.data)
| agpl-3.0 |
loansindi/linux | tools/perf/scripts/python/futex-contention.py | 1997 | 1508 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
bq/linux-e60q22 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
Clinical-Genomics/scout | scout/commands/export/transcript.py | 1 | 1024 | import logging
import click
from flask.cli import with_appcontext
from scout.commands.utils import builds_option
from scout.export.transcript import export_transcripts
from scout.server.extensions import store
LOG = logging.getLogger(__name__)
@click.command("transcripts", short_help="Export transcripts")
@builds_option
@with_appcontext
def transcripts(build):
"""Export all transcripts to .bed like format"""
LOG.info("Running scout export transcripts")
adapter = store
header = ["#Chrom\tStart\tEnd\tTranscript\tRefSeq\tHgncID"]
for line in header:
click.echo(line)
transcript_string = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}"
for tx_obj in export_transcripts(adapter, build):
click.echo(
transcript_string.format(
tx_obj["chrom"],
tx_obj["start"],
tx_obj["end"],
tx_obj["ensembl_transcript_id"],
tx_obj.get("refseq_id", ""),
tx_obj["hgnc_id"],
)
)
| bsd-3-clause |
sephalon/python-ivi | ivi/agilent/agilentMSO6054A.py | 7 | 1687 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent6000 import *
class agilentMSO6054A(agilent6000):
"Agilent InfiniiVision MSO6054A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO6054A')
super(agilentMSO6054A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 500e6
self._init_channels()
| mit |
raymondnijssen/QGIS | python/plugins/processing/algs/qgis/SetRasterStyle.py | 17 | 2813 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SetRasterStyle.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsProcessingAlgorithm,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterFile,
QgsProcessingOutputRasterLayer)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class SetRasterStyle(QgisAlgorithm):
INPUT = 'INPUT'
STYLE = 'STYLE'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Raster tools')
def groupId(self):
return 'rastertools'
def __init__(self):
super().__init__()
def flags(self):
return super().flags() | QgsProcessingAlgorithm.FlagNoThreading
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Raster layer')))
self.addParameter(QgsProcessingParameterFile(self.STYLE,
self.tr('Style file'), extension='qml'))
self.addOutput(QgsProcessingOutputRasterLayer(self.INPUT, self.tr('Styled')))
def name(self):
return 'setstyleforrasterlayer'
def displayName(self):
return self.tr('Set style for raster layer')
def processAlgorithm(self, parameters, context, feedback):
layer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
style = self.parameterAsFile(parameters, self.STYLE, context)
with open(style) as f:
xml = "".join(f.readlines())
d = QDomDocument()
d.setContent(xml)
layer.importNamedStyle(d)
layer.triggerRepaint()
return {self.INPUT: layer}
| gpl-2.0 |
dannyperry571/theapprentice | script.module.liveresolver/lib/liveresolver/modules/unpackstd.py | 40 | 3677 | #
# Unpacker for Dean Edward's p.a.c.k.e.r, a part of javascript beautifier
# by Einar Lielmanis <einar@jsbeautifier.org>
#
# written by Stefano Sanfilippo <a.little.coder@gmail.com>
#
# usage:
#
# if detect(some_string):
# unpacked = unpack(some_string)
#
"""Unpacker for Dean Edward's p.a.c.k.e.r"""
import re
import string
class UnpackingError(Exception):
"""Badly packed source or general error. Argument is a
meaningful description."""
pass
PRIORITY = 1
def detect(source):
"""Detects whether `source` is P.A.C.K.E.R. coded."""
return source.replace(' ', '').startswith('eval(function(p,a,c,k,e,')
def unpack(source):
"""Unpacks P.A.C.K.E.R. packed js code."""
payload, symtab, radix, count = _filterargs(source)
if count != len(symtab):
raise UnpackingError('Malformed p.a.c.k.e.r. symtab.')
try:
unbase = Unbaser(radix)
except TypeError:
raise UnpackingError('Unknown p.a.c.k.e.r. encoding.')
def lookup(match):
"""Look up symbols in the synthetic symtab."""
word = match.group(0)
return symtab[unbase(word)] or word
source = re.sub(r'\b\w+\b', lookup, payload)
return _replacestrings(source)
def _filterargs(source):
"""Juice from a source file the four args needed by decoder."""
juicers = [ (r"}\('(.*)', *(\d+), *(\d+), *'(.*)'\.split\('\|'\), *(\d+), *(.*)\)\)"),
(r"}\('(.*)', *(\d+), *(\d+), *'(.*)'\.split\('\|'\)"),
]
for juicer in juicers:
args = re.search(juicer, source, re.DOTALL)
if args:
a = args.groups()
try:
return a[0], a[3].split('|'), int(a[1]), int(a[2])
except ValueError:
raise UnpackingError('Corrupted p.a.c.k.e.r. data.')
# could not find a satisfying regex
raise UnpackingError('Could not make sense of p.a.c.k.e.r data (unexpected code structure)')
def _replacestrings(source):
"""Strip string lookup table (list) and replace values in source."""
match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL)
if match:
varname, strings = match.groups()
startpoint = len(match.group(0))
lookup = strings.split('","')
variable = '%s[%%d]' % varname
for index, value in enumerate(lookup):
source = source.replace(variable % index, '"%s"' % value)
return source[startpoint:]
return source
class Unbaser(object):
"""Functor for a given base. Will efficiently convert
strings to natural numbers."""
ALPHABET = {
62 : '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
95 : (' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'[\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
}
def __init__(self, base):
self.base = base
# If base can be handled by int() builtin, let it do it for us
if 2 <= base <= 36:
self.unbase = lambda string: int(string, base)
else:
# Build conversion dictionary cache
try:
self.dictionary = dict((cipher, index) for
index, cipher in enumerate(self.ALPHABET[base]))
except KeyError:
raise TypeError('Unsupported base encoding.')
self.unbase = self._dictunbaser
def __call__(self, string):
return self.unbase(string)
def _dictunbaser(self, string):
"""Decodes a value to an integer."""
ret = 0
for index, cipher in enumerate(string[::-1]):
ret += (self.base ** index) * self.dictionary[cipher]
return ret
| gpl-2.0 |
MicroTrustRepos/microkernel | src/l4/pkg/python/contrib/Lib/getopt.py | 167 | 7316 | # -*- coding: iso-8859-1 -*-
"""Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <liw@iki.fi>.
#
# Gerrit Holl <gerrit@nl.linux.org> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Åstrand <astrand@lysator.liu.se> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - a option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError('option --%s requires argument' % opt, opt)
optarg, args = args[0], args[1:]
elif optarg:
raise GetoptError('option --%s must not have an argument' % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError('option --%s not recognized' % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError('option --%s not a unique prefix' % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError('option -%s requires argument' % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError('option -%s not recognized' % opt, opt)
if __name__ == '__main__':
import sys
print getopt(sys.argv[1:], "a:b", ["alpha=", "beta"])
| gpl-2.0 |
rlefevre1/hpp-rbprm-corba | src/hpp/corbaserver/rbprm/problem_solver.py | 1 | 15788 | #!/usr/bin/env python
# Copyright (c) 2014 CNRS
# Author: Florent Lamiraux
#
# This file is part of hpp-corbaserver.
# hpp-corbaserver is free software: you can redistribute it
# and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# hpp-corbaserver is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# hpp-corbaserver. If not, see
# <http://www.gnu.org/licenses/>.
## Definition of a path planning problem
#
# This class wraps the Corba client to the server implemented by
# libhpp-corbaserver.so
#
# Some method implemented by the server can be considered as private. The
# goal of this class is to hide them and to expose those that can be
# considered as public.
class ProblemSolver (object):
def __init__ (self, robot):
self.client = robot.client.basic
self.robot = robot
## \name Initial and goal configurations
# \{
## Set initial configuration of specified problem.
# \param dofArray Array of degrees of freedom
# \throw Error.
def setInitialConfig (self, dofArray):
return self.client.problem.setInitialConfig (dofArray)
## Get initial configuration of specified problem.
# \return Array of degrees of freedom
def getInitialConfig (self):
return self.client.problem.getInitialConfig ()
## Add goal configuration to specified problem.
# \param dofArray Array of degrees of freedom
# \throw Error.
def addGoalConfig (self, dofArray):
return self.client.problem.addGoalConfig (dofArray)
## Get goal configurations of specified problem.
# \return Array of degrees of freedom
def getGoalConfigs (self):
return self.client.problem.getGoalConfigs ()
## Reset goal configurations
def resetGoalConfigs (self):
return self.client.problem.resetGoalConfigs ()
## \}
## \name Obstacles
# \{
## Load obstacle from urdf file
# \param package Name of the package containing the model,
# \param filename name of the urdf file in the package
# (without suffix .urdf)
# \param prefix prefix added to object names in case the same file is
# loaded several times
#
# The ros url is built as follows:
# "package://${package}/urdf/${filename}.urdf"
#
# The kinematic structure of the urdf file is ignored. Only the geometric
# objects are loaded as obstacles.
def loadObstacleFromUrdf (self, package, filename, prefix):
return self.client.obstacle.loadObstacleModel (package, filename,
prefix)
## Remove an obstacle from outer objects of a joint body
#
# \param objectName name of the object to remove,
# \param jointName name of the joint owning the body,
# \param collision whether collision with object should be computed,
# \param distance whether distance to object should be computed.
# \throw Error.
def removeObstacleFromJoint (self, objectName, jointName, collision,
distance):
return self.client.obstacle.removeObstacleFromJoint \
(objectName, jointName, collision, distance)
## Move an obstacle to a given configuration.
# \param objectName name of the polyhedron.
# \param cfg the configuration of the obstacle.
# \throw Error.
#
# \note The obstacle is not added to local map
# impl::Obstacle::collisionListMap.
#
# \note Build the collision entity of polyhedron for KCD.
def moveObstacle (self, objectName, cfg):
return self.client.obstacle.moveObstacle (objectName, cfg)
## Get the position of an obstacle
#
# \param objectName name of the polyhedron.
# \retval cfg Position of the obstacle.
# \throw Error.
def getObstaclePosition (self, objectName):
return self.client.obstacle.getObstaclePosition (objectName)
## Get list of obstacles
#
# \param collision whether to return obstacle for collision,
# \param distance whether to return obstacles for distance computation
# \return list of obstacles
def getObstacleNames (self, collision, distance):
return self.client.obstacle.getObstacleNames (collision, distance)
##\}
## \name Constraints
# \{
## Create orientation constraint between two joints
#
# \param constraintName name of the constraint created,
# \param joint1Name name of first joint
# \param joint2Name name of second joint
# \param p quaternion representing the desired orientation
# of joint2 in the frame of joint1.
# \param mask Select which axis to be constrained.
# If joint1 or joint2 is "", the corresponding joint is replaced by
# the global frame.
# constraints are stored in ProblemSolver object
def createOrientationConstraint (self, constraintName, joint1Name,
joint2Name, p, mask):
return self.client.problem.createOrientationConstraint \
(constraintName, joint1Name, joint2Name, p, mask)
## Create RelativeCom constraint between two joints
#
# \param constraintName name of the constraint created,
# \param comName name of CenterOfMassComputation
# \param jointName name of joint
# \param point point in local frame of joint.
# \param mask Select axis to be constrained.
# If jointName is "", the robot root joint is used.
# Constraints are stored in ProblemSolver object
def createRelativeComConstraint (self, constraintName, comName, jointLName, point, mask):
return self.client.problem.createRelativeComConstraint \
(constraintName, comName, jointLName, point, mask)
## Create ComBeetweenFeet constraint between two joints
#
# \param constraintName name of the constraint created,
# \param comName name of CenterOfMassComputation
# \param jointLName name of first joint
# \param jointRName name of second joint
# \param pointL point in local frame of jointL.
# \param pointR point in local frame of jointR.
# \param jointRefName name of second joint
# \param mask Select axis to be constrained.
# If jointRef is "", the robot root joint is used.
# Constraints are stored in ProblemSolver object
def createComBeetweenFeet (self, constraintName, comName, jointLName, jointRName,
pointL, pointR, jointRefName, mask):
return self.client.problem.createComBeetweenFeet \
(constraintName, comName, jointLName, jointRName, pointL, pointR, jointRefName, mask)
## Add an object to compute a partial COM of the robot.
# \param name of the partial com
# \param jointNames list of joint name of each tree ROOT to consider.
# \note Joints are added recursively, it is not possible so far to add a
# joint without addind all its children.
def addPartialCom (self, comName, jointNames):
return self.client.robot.addPartialCom (comName, jointNames);
## Create position constraint between two joints
#
# \param constraintName name of the constraint created,
# \param joint1Name name of first joint
# \param joint2Name name of second joint
# \param point1 point in local frame of joint1,
# \param point2 point in local frame of joint2.
# \param mask Select which axis to be constrained.
# If joint1 of joint2 is "", the corresponding joint is replaced by
# the global frame.
# constraints are stored in ProblemSolver object
def createPositionConstraint (self, constraintName, joint1Name,
joint2Name, point1, point2, mask):
return self.client.problem.createPositionConstraint \
(constraintName, joint1Name, joint2Name, point1, point2, mask)
## Reset Constraints
#
# Reset all constraints, including numerical constraints and locked
# joints
def resetConstraints (self):
return self.client.problem.resetConstraints ()
## Set numerical constraints in ConfigProjector
#
# \param name name of the resulting numerical constraint obtained
# by stacking elementary numerical constraints,
# \param names list of names of the numerical constraints as
# inserted by method hpp::core::ProblemSolver::addNumericalConstraint.
def setNumericalConstraints (self, name, names):
return self.client.problem.setNumericalConstraints (name, names)
## Apply constraints
#
# \param q initial configuration
# \return configuration projected in success,
# \throw Error if projection failed.
def applyConstraints (self, q):
return self.client.problem.applyConstraints (q)
## Create a vector of passive dofs.
#
# \param name name of the vector in the ProblemSolver map.
# \param dofNames list of names of DOF that may
def addPassiveDofs (self, name, dofNames):
return self.client.problem.addPassiveDofs (name, dofNames)
## Generate a configuration satisfying the constraints
#
# \param maxIter maximum number of tries,
# \return configuration projected in success,
# \throw Error if projection failed.
def generateValidConfig (self, maxIter):
return self.client.problem.generateValidConfig (maxIter)
## Lock joint with given joint configuration
# \param jointName name of the joint
# \param value value of the joint configuration
def lockJoint (self, jointName, value):
return self.client.problem.lockJoint (jointName, value)
## error threshold in numerical constraint resolution
def setErrorThreshold (self, threshold):
return self.client.problem.setErrorThreshold (threshold)
## Set the maximal number of iterations
def setMaxIterations (self, iterations):
return self.client.problem.setMaxIterations (iterations)
## \}
## \name Solve problem and get paths
# \{
## Select path planner type
# \param Name of the path planner type, either "DiffusingPlanner",
# "VisibilityPrmPlanner", or any type added by method
# core::ProblemSolver::addPathPlannerType
def selectPathPlanner (self, pathPlannerType):
return self.client.problem.selectPathPlanner (pathPlannerType)
## Add a path optimizer
# \param Name of the path optimizer type, either "RandomShortcut" or
# any type added by core::ProblemSolver::addPathOptimizerType
def addPathOptimizer (self, pathOptimizerType):
return self.client.problem.addPathOptimizer (pathOptimizerType)
## Clear sequence of path optimizers
#
def clearPathOptimizers (self):
return self.client.problem.clearPathOptimizers ()
## Select path validation method
# \param Name of the path validation method, either "Discretized"
# "Progressive", "Dichotomy", or any type added by
# core::ProblemSolver::addPathValidationType,
# \param tolerance maximal acceptable penetration.
def selectPathValidation (self, pathValidationType, tolerance):
return self.client.problem.selectPathValidation (pathValidationType,
tolerance)
## Select path projector method
# \param Name of the path projector method, either "Discretized"
# "Progressive", "Dichotomy", or any type added by
# core::ProblemSolver::addPathProjectorType,
# \param tolerance maximal acceptable penetration.
def selectPathProjector (self, pathProjectorType, tolerance):
return self.client.problem.selectPathProjector (pathProjectorType,
tolerance)
## Solve the problem of corresponding ChppPlanner object
def solve (self):
return self.client.problem.solve ()
## Make direct connection between two configurations
# \param startConfig, endConfig: the configurations to link.
# \throw Error if steering method fails to create a direct path of if
# direct path is not valid
def directPath (self, startConfig, endConfig):
return self.client.problem.directPath (startConfig, endConfig)
## Get Number of paths
def numberPaths (self):
return self.client.problem.numberPaths ()
## Optimize a given path
# \param inPathId Id of the path in this problem.
# \throw Error.
def optimizePath(self, inPathId):
return self.client.problem.optimizePath (inPathId)
## Get length of path
# \param inPathId rank of the path in the problem
# \return length of path if path exists.
def pathLength(self, inPathId):
return self.client.problem.pathLength(inPathId)
## Get the robot's config at param on the a path
# \param inPathId rank of the path in the problem
# \param atDistance : the user parameter choice
# \return dofseq : the config at param
def configAtParam (self, inPathId, atDistance):
return self.client.problem.configAtParam (inPathId, atDistance)
## Get way points of a path
# \param pathId rank of the path in the problem
def getWaypoints (self, pathId):
return self.client.problem.getWaypoints (pathId)
## \name Interruption of a path planning request
# \{
## \brief Interrupt path planning activity
# \note this method is effective only when multi-thread policy is used
# by CORBA server.
# See constructor of class Server for details.
def interruptPathPlanning (self):
return self.client.problem.interruptPathPlanning ()
# \}
## \name exploring the roadmap
# \{
## Get nodes of the roadmap.
def nodes(self):
return self.client.problem.nodes ()
# the configuration of the node nodeId
def node(self,nodeId):
return self.client.problem.node(nodeId)
# the number of nodes in the roadmap
def numberNodes(self):
return self.client.problem.numberNodes ()
## Number of edges
def numberEdges (self):
return self.client.problem.numberEdges ()
## Edge at given rank
def edge (self, edgeId):
return self.client.problem.edge (edgeId)
## Number of connected components
def numberConnectedComponents (self):
return self.client.problem.numberConnectedComponents ()
## Nodes of a connected component
# \param connectedComponentId index of connected component in roadmap
# \return list of nodes of the connected component.
def nodesConnectedComponent (self, ccId):
return self.client.problem.nodesConnectedComponent (ccId)
## Clear the roadmap
def clearRoadmap (self):
return self.client.problem.clearRoadmap ()
## \}
## Select steering method type
# \param Name of the steering method type, either
# "SteeringMethodStraight" or any type added by method
# core::ProblemSolver::addSteeringMethodType
def selectSteeringMethod (self, steeringMethodType):
return self.client.problem.selectSteeringMethod (steeringMethodType)
## Select distance type
# \param Name of the distance type, either
# "WeighedDistance" or any type added by method
# core::ProblemSolver::addDistanceType
def selectDistance (self, distanceType):
return self.client.problem.selectDistance (distanceType)
| lgpl-3.0 |
mpeuster/estate | experiments/scaleability-dynamic/pox/pox/web/webcore.py | 40 | 15724 | # Copyright 2011,2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Webcore is a basic web server framework based on the SocketServer-based
BaseHTTPServer that comes with Python. The big difference is that this
one can carve up URL-space by prefix, such that "/foo/*" gets handled by
a different request handler than "/bar/*". I refer to this as "splitting".
You should also be able to make a request handler written without splitting
run under Webcore. This may not work for all request handlers, but it
definitely works for some. :) The easiest way to do this is with the
wrapRequestHandler() function, like so:
from CGIHTTPServer import CGIHTTPRequestHandler as CHRH
core.WebServer.set_handler("/foo", wrapRequestHandler(CHRH))
.. now URLs under the /foo/ directory will let you browse through the
filesystem next to pox.py. If you create a cgi-bin directory next to
pox.py, you'll be able to run executables in it.
For this specific purpose, there's actually a SplitCGIRequestHandler
which demonstrates wrapping a normal request handler while also
customizing it a bit -- SplitCGIRequestHandler shoehorns in functionality
to use arbitrary base paths.
BaseHTTPServer is not very fast and needs to run on its own thread.
It'd actually be great to have a version of this written against, say,
CherryPy, but I did want to include a simple, dependency-free web solution.
"""
from SocketServer import ThreadingMixIn
from BaseHTTPServer import *
from time import sleep
import select
import threading
import random
import hashlib
import base64
from pox.core import core
import os
import posixpath
import urllib
import cgi
import errno
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
log = core.getLogger()
try:
weblog = log.getChild("server")
except:
# I'm tired of people running Python 2.6 having problems with this.
#TODO: Remove this someday.
weblog = core.getLogger("webcore.server")
def _setAttribs (parent, child):
attrs = ['command', 'request_version', 'close_connection',
'raw_requestline', 'requestline', 'path', 'headers', 'wfile',
'rfile', 'server', 'client_address']
for a in attrs:
setattr(child, a, getattr(parent, a))
setattr(child, 'parent', parent)
import SimpleHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class SplitRequestHandler (BaseHTTPRequestHandler):
"""
To write HTTP handlers for POX, inherit from this class instead of
BaseHTTPRequestHandler. The interface should be the same -- the same
variables should be set, and the same do_GET(), etc. methods should
be called.
In addition, there will be a self.args which can be specified
when you set_handler() on the server.
"""
# Also a StreamRequestHandler
def __init__ (self, parent, prefix, args):
_setAttribs(parent, self)
self.parent = parent
self.args = args
self.prefix = prefix
self._init()
def _init (self):
"""
This is called by __init__ during initialization. You can
override it to, for example, parse .args.
"""
pass
def handle_one_request (self):
raise RuntimeError("Not supported")
def handle(self):
raise RuntimeError("Not supported")
def _split_dispatch (self, command, handler = None):
if handler is None: handler = self
mname = 'do_' + self.command
if not hasattr(handler, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(handler, mname)
return method()
def log_request (self, code = '-', size = '-'):
weblog.debug(self.prefix + (':"%s" %s %s' %
(self.requestline, str(code), str(size))))
def log_error (self, fmt, *args):
weblog.error(self.prefix + ':' + (fmt % args))
def log_message (self, fmt, *args):
weblog.info(self.prefix + ':' + (fmt % args))
_favicon = ("47494638396110001000c206006a5797927bc18f83ada9a1bfb49ceabda"
+ "4f4ffffffffffff21f904010a0007002c000000001000100000034578badcfe30b20"
+ "1c038d4e27a0f2004e081e2172a4051942abba260309ea6b805ab501581ae3129d90"
+ "1275c6404b80a72f5abcd4a2454cb334dbd9e58e74693b97425e07002003b")
_favicon = ''.join([chr(int(_favicon[n:n+2],16))
for n in xrange(0,len(_favicon),2)])
class CoreHandler (SplitRequestHandler):
"""
A default page to say hi from POX.
"""
def do_GET (self):
"""Serve a GET request."""
self.do_content(True)
def do_HEAD (self):
"""Serve a HEAD request."""
self.do_content(False)
def do_content (self, is_get):
if self.path == "/":
self.send_info(is_get)
elif self.path.startswith("/favicon."):
self.send_favicon(is_get)
else:
self.send_error(404, "File not found on CoreHandler")
def send_favicon (self, is_get = False):
self.send_response(200)
self.send_header("Content-type", "image/gif")
self.send_header("Content-Length", str(len(_favicon)))
self.end_headers()
if is_get:
self.wfile.write(_favicon)
def send_info (self, is_get = False):
r = "<html><head><title>POX</title></head>\n"
r += "<body>\n<h1>POX Webserver</h1>\n<h2>Components</h2>\n"
r += "<ul>"
for k in sorted(core.components):
v = core.components[k]
r += "<li>%s - %s</li>\n" % (cgi.escape(str(k)), cgi.escape(str(v)))
r += "</ul>\n\n<h2>Web Prefixes</h2>"
r += "<ul>"
m = [map(cgi.escape, map(str, [x[0],x[1],x[3]]))
for x in self.args.matches]
m.sort()
for v in m:
r += "<li><a href='{0}'>{0}</a> - {1} {2}</li>\n".format(*v)
r += "</ul></body></html>\n"
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(len(r)))
self.end_headers()
if is_get:
self.wfile.write(r)
class StaticContentHandler (SplitRequestHandler, SimpleHTTPRequestHandler):
# We slightly modify SimpleHTTPRequestHandler to serve from given
# directories and inherit from from Python, but
# modified to serve from given directories and to inherit from
# SplitRequestHandler.
"""
A SplitRequestHandler for serving static content
This is largely the same as the Python SimpleHTTPRequestHandler, but
we modify it to serve from arbitrary directories at arbitrary
positions in the URL space.
"""
server_version = "StaticContentHandler/1.0"
def send_head (self):
# We override this and handle the directory redirection case because
# we want to include the per-split prefix.
path = self.translate_path(self.path)
if os.path.isdir(path):
if not self.path.endswith('/'):
self.send_response(301)
self.send_header("Location", self.prefix + self.path + "/")
self.end_headers()
return None
return SimpleHTTPRequestHandler.send_head(self)
def list_directory (self, dirpath):
# dirpath is an OS path
try:
d = os.listdir(dirpath)
except OSError as e:
if e.errno == errno.EACCES:
self.send_error(403, "This directory is not listable")
elif e.errno == errno.ENOENT:
self.send_error(404, "This directory does not exist")
else:
self.send_error(400, "Unknown error")
return None
d.sort(key=str.lower)
r = StringIO()
r.write("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\n")
path = posixpath.join(self.prefix, cgi.escape(self.path).lstrip("/"))
r.write("<html><head><title>" + path + "</title></head>\n")
r.write("<body><pre>")
parts = path.rstrip("/").split("/")
r.write('<a href="/">/</a>')
for i,part in enumerate(parts):
link = urllib.quote("/".join(parts[:i+1]))
if i > 0: part += "/"
r.write('<a href="%s">%s</a>' % (link, cgi.escape(part)))
r.write("\n" + "-" * (0+len(path)) + "\n")
dirs = []
files = []
for f in d:
if f.startswith("."): continue
if os.path.isdir(os.path.join(dirpath, f)):
dirs.append(f)
else:
files.append(f)
def entry (n, rest=''):
link = urllib.quote(n)
name = cgi.escape(n)
r.write('<a href="%s">%s</a>\n' % (link,name+rest))
for f in dirs:
entry(f, "/")
for f in files:
entry(f)
r.write("</pre></body></html>")
r.seek(0)
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.send_header("Content-Length", str(len(r.getvalue())))
self.end_headers()
return r
def translate_path (self, path, include_prefix = True):
"""
Translate a web-path to a local filesystem path
Odd path elements (e.g., ones that contain local filesystem path
separators) are stripped.
"""
def fixpath (p):
o = []
skip = 0
while True:
p,tail = posixpath.split(p)
if p in ('/','') and tail == '': break
if tail in ('','.', os.path.curdir, os.path.pardir): continue
if os.path.sep in tail: continue
if os.path.altsep and os.path.altsep in tail: continue
if os.path.splitdrive(tail)[0] != '': continue
if tail == '..':
skip += 1
continue
if skip:
skip -= 1
continue
o.append(tail)
o.reverse()
return o
# Remove query string / fragment
if "?" in path: path = path[:path.index("?")]
if "#" in path: path = path[:path.index("#")]
path = fixpath(path)
if path:
path = os.path.join(*path)
else:
path = ''
if include_prefix:
path = os.path.join(os.path.abspath(self.args['root']), path)
return path
def wrapRequestHandler (handlerClass):
return type("Split" + handlerClass.__name__,
(SplitRequestHandler, handlerClass, object), {})
from CGIHTTPServer import CGIHTTPRequestHandler
class SplitCGIRequestHandler (SplitRequestHandler,
CGIHTTPRequestHandler, object):
"""
Runs CGIRequestHandler serving from an arbitrary path.
This really should be a feature of CGIRequestHandler and the way of
implementing it here is scary and awful, but it at least sort of works.
"""
__lock = threading.Lock()
def _split_dispatch (self, command):
with self.__lock:
olddir = os.getcwd()
try:
os.chdir(self.args)
return SplitRequestHandler._split_dispatch(self, command)
finally:
os.chdir(olddir)
class SplitterRequestHandler (BaseHTTPRequestHandler):
def __init__ (self, *args, **kw):
#self.rec = Recording(args[0])
#self.args = args
#self.matches = self.matches.sort(key=lambda e:len(e[0]),reverse=True)
#BaseHTTPRequestHandler.__init__(self, self.rec, *args[1:], **kw)
BaseHTTPRequestHandler.__init__(self, *args, **kw)
def log_request (self, code = '-', size = '-'):
weblog.debug('splitter:"%s" %s %s',
self.requestline, str(code), str(size))
def log_error (self, fmt, *args):
weblog.error('splitter:' + fmt % args)
def log_message (self, fmt, *args):
weblog.info('splitter:' + fmt % args)
def handle_one_request(self):
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request(): # An error code has been sent, just exit
return
handler = None
while True:
for m in self.server.matches:
if self.path.startswith(m[0]):
#print m,self.path
handler = m[1](self, m[0], m[3])
#pb = self.rec.getPlayback()
#handler = m[1](pb, *self.args[1:])
_setAttribs(self, handler)
if m[2]:
# Trim. Behavior is not "perfect"
handler.path = self.path[len(m[0]):]
if m[0].endswith('/'):
handler.path = '/' + handler.path
break
if handler is None:
handler = self
if not self.path.endswith('/'):
# Handle splits like directories
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
break
break
return handler._split_dispatch(self.command)
class SplitThreadedServer(ThreadingMixIn, HTTPServer):
matches = [] # Tuples of (Prefix, TrimPrefix, Handler)
# def __init__ (self, *args, **kw):
# BaseHTTPRequestHandler.__init__(self, *args, **kw)
# self.matches = self.matches.sort(key=lambda e:len(e[0]),reverse=True)
def set_handler (self, prefix, handler, args = None, trim_prefix = True):
# Not very efficient
assert (handler is None) or (issubclass(handler, SplitRequestHandler))
self.matches = [m for m in self.matches if m[0] != prefix]
if handler is None: return
self.matches.append((prefix, handler, trim_prefix, args))
self.matches.sort(key=lambda e:len(e[0]),reverse=True)
def add_static_dir (self, www_path, local_path=None, relative=False):
"""
Serves a directory of static content.
www_path is the prefix of the URL that maps to this directory.
local_path is the directory to serve content from. If it's not
specified, it is assume to be a directory with the same name as
www_path.
relative, if True, means that the local path is to be a sibling
of the calling module.
For an example, see the launch() function in this module.
"""
if not www_path.startswith('/'): www_path = '/' + www_path
if local_path is None:
local_path = www_path[1:]
if relative:
local_path = os.path.basename(local_path)
if relative:
import inspect
path = inspect.stack()[1][1]
path = os.path.dirname(path)
local_path = os.path.join(path, local_path)
local_path = os.path.abspath(local_path)
log.debug("Serving %s at %s", local_path, www_path)
self.set_handler(www_path, StaticContentHandler,
{'root':local_path}, True);
def launch (address='', port=8000, static=False):
httpd = SplitThreadedServer((address, int(port)), SplitterRequestHandler)
core.register("WebServer", httpd)
httpd.set_handler("/", CoreHandler, httpd, True)
#httpd.set_handler("/foo", StaticContentHandler, {'root':'.'}, True)
#httpd.set_handler("/f", StaticContentHandler, {'root':'pox'}, True)
#httpd.set_handler("/cgis", SplitCGIRequestHandler, "pox/web/www_root")
if static is True:
httpd.add_static_dir('static', 'www_root', relative=True)
elif static is False:
pass
else:
static = static.split(",")
for entry in static:
if entry.lower() == "":
httpd.add_static_dir('static', 'www_root', relative=True)
continue
if ':' not in entry:
directory = entry
prefix = os.path.split(directory)
if prefix[1] == '':
prefix = os.path.split(prefix[0])
prefix = prefix[1]
assert prefix != ''
else:
prefix,directory = entry.split(":")
directory = os.path.expanduser(directory)
httpd.add_static_dir(prefix, directory, relative=False)
def run ():
try:
log.debug("Listening on %s:%i" % httpd.socket.getsockname())
httpd.serve_forever()
except:
pass
log.info("Server quit")
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
| apache-2.0 |
tzaffi/git-in-practice-repo | book/lib/python2.7/site-packages/django/test/signals.py | 222 | 2719 | import os
import time
from django.conf import settings
from django.db import connections
from django.dispatch import receiver, Signal
from django.utils import timezone
from django.utils.functional import empty
template_rendered = Signal(providing_args=["template", "context"])
setting_changed = Signal(providing_args=["setting", "value"])
# Most setting_changed receivers are supposed to be added below,
# except for cases where the receiver is related to a contrib app.
@receiver(setting_changed)
def update_connections_time_zone(**kwargs):
if kwargs['setting'] == 'TIME_ZONE':
# Reset process time zone
if hasattr(time, 'tzset'):
if kwargs['value']:
os.environ['TZ'] = kwargs['value']
else:
os.environ.pop('TZ', None)
time.tzset()
# Reset local time zone cache
timezone._localtime = None
# Reset the database connections' time zone
if kwargs['setting'] == 'USE_TZ' and settings.TIME_ZONE != 'UTC':
USE_TZ, TIME_ZONE = kwargs['value'], settings.TIME_ZONE
elif kwargs['setting'] == 'TIME_ZONE' and not settings.USE_TZ:
USE_TZ, TIME_ZONE = settings.USE_TZ, kwargs['value']
else:
# no need to change the database connnections' time zones
return
tz = 'UTC' if USE_TZ else TIME_ZONE
for conn in connections.all():
conn.settings_dict['TIME_ZONE'] = tz
tz_sql = conn.ops.set_time_zone_sql()
if tz_sql:
conn.cursor().execute(tz_sql, [tz])
@receiver(setting_changed)
def clear_context_processors_cache(**kwargs):
if kwargs['setting'] == 'TEMPLATE_CONTEXT_PROCESSORS':
from django.template import context
context._standard_context_processors = None
@receiver(setting_changed)
def clear_template_loaders_cache(**kwargs):
if kwargs['setting'] == 'TEMPLATE_LOADERS':
from django.template import loader
loader.template_source_loaders = None
@receiver(setting_changed)
def clear_serializers_cache(**kwargs):
if kwargs['setting'] == 'SERIALIZATION_MODULES':
from django.core import serializers
serializers._serializers = {}
@receiver(setting_changed)
def language_changed(**kwargs):
if kwargs['setting'] in ('LOCALE_PATHS', 'LANGUAGE_CODE'):
from django.utils.translation import trans_real
trans_real._default = None
if kwargs['setting'] == 'LOCALE_PATHS':
trans_real._translations = {}
@receiver(setting_changed)
def file_storage_changed(**kwargs):
if kwargs['setting'] in ('MEDIA_ROOT', 'DEFAULT_FILE_STORAGE'):
from django.core.files.storage import default_storage
default_storage._wrapped = empty
| mit |
ExcaliburZero/blackjack | tests/test_deck.py | 1 | 2215 | """These are tests of the Deck class."""
import unittest
from blackjack import Deck
from blackjack import Card
from blackjack import InvalidDeckSize
from blackjack import InvalidDeckDraw
class TestDeck(unittest.TestCase):
"""A class which defines the various tests of the Deck class."""
def test_deck_size(self):
"""
A test which checks to make sure that the deck is created with the correct number of cards.
"""
# Test valid number of packs of cards
valid_packs = [
1,
2,
3,
]
for packs in valid_packs:
deck = Deck(packs)
self.assertEqual(len(deck), 52 * packs, msg="Incorrect number of cards " + \
str(len(deck)) + " in deck of " + str(packs) + " packs.")
# Test invalid numbers of packs of cards
invalid_packs = [
0,
-1,
]
for packs in invalid_packs:
success = False
try:
deck = Deck(packs)
except InvalidDeckSize:
success = True
self.assertTrue(success, msg="Invalid number of packs of cards created: " + \
str(packs) + ".")
def test_deck_draw(self):
"""A test which checks the draw method of the deck class."""
deck = Deck(1)
self.assertEqual(len(deck), 52, msg="Incorrect number of inital cards in deck.")
card = deck.draw()
self.assertEqual(len(deck), 51, msg="Incorrect number of cards in deck after draw.")
# Make sure that the card object drawn is actually a card
self.assertTrue(isinstance(card, Card), msg="The card returned by the draw method of " + \
"Deck is not a Card object.")
# Make sure that all cards can be drawn
deck = Deck(1)
for x in range(52):
deck.draw()
self.assertEqual(len(deck), 0)
# Make sure that an error is raised when the deck is empty
success = False
try:
deck.draw()
except InvalidDeckDraw:
success = True
self.assertTrue(success, msg="Deck does not raise error when drawing from an empty deck.")
| mit |
kavi112/google-python-exercises | logpuzzle/logpuzzle.py | 147 | 1564 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import os
import re
import sys
import urllib
"""Logpuzzle exercise
Given an apache logfile, find the puzzle urls and download the images.
Here's what a puzzle url looks like:
10.254.254.28 - - [06/Aug/2007:00:13:48 -0700] "GET /~foo/puzzle-bar-aaab.jpg HTTP/1.0" 302 528 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6"
"""
def read_urls(filename):
"""Returns a list of the puzzle urls from the given log file,
extracting the hostname from the filename itself.
Screens out duplicate urls and returns the urls sorted into
increasing order."""
# +++your code here+++
def download_images(img_urls, dest_dir):
"""Given the urls already in the correct order, downloads
each image into the given directory.
Gives the images local filenames img0, img1, and so on.
Creates an index.html in the directory
with an img tag to show each local image file.
Creates the directory if necessary.
"""
# +++your code here+++
def main():
args = sys.argv[1:]
if not args:
print 'usage: [--todir dir] logfile '
sys.exit(1)
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
img_urls = read_urls(args[0])
if todir:
download_images(img_urls, todir)
else:
print '\n'.join(img_urls)
if __name__ == '__main__':
main()
| apache-2.0 |
dkerwin/django-pki | pki/openssl.py | 1 | 18269 | import os
import re
import string
import random
from subprocess import Popen, PIPE, STDOUT
from shutil import rmtree
from logging import getLogger
from django.template.loader import render_to_string
import pki.models
from pki.helper import subject_for_object
from pki.settings import PKI_OPENSSL_BIN, PKI_OPENSSL_CONF, PKI_DIR, PKI_OPENSSL_TEMPLATE, \
PKI_SELF_SIGNED_SERIAL, PKI_CA_NAME_BLACKLIST
try:
# available in python-2.5 and greater
from hashlib import md5 as md5_constructor
except ImportError:
# compatibility fallback
from md5 import new as md5_constructor
logger = getLogger("pki")
def refresh_pki_metadata(ca_list):
"""Refresh pki metadata (PKI storage directories and openssl configuration files)
Each ca_list element is a dictionary:
'name': CA name
"""
# refresh directory structure
dirs = { 'certs' : 0755,
'private': 0700,
'crl' : 0755,
}
try:
# create base PKI directory if necessary
if not os.path.exists(PKI_DIR):
logger.info('Creating base PKI directory %s' % PKI_DIR)
os.mkdir(PKI_DIR, 0700)
# list of old CA directories for possible purging
purge_dirs = set([os.path.join(PKI_DIR, d) for d in os.listdir(PKI_DIR)
if os.path.isdir(os.path.join(PKI_DIR, d))])
# loop over CAs and create necessary filesystem objects
for ca in ca_list:
ca_dir = os.path.join(PKI_DIR, ca.name)
# create CA directory if necessary
if not ca_dir in purge_dirs:
logger.info("Creating base directory for new CA %s" % ca.name)
os.mkdir(ca_dir)
# create nested directories for key storage with proper permissions
for d, m in dirs.items():
os.mkdir(os.path.join(ca_dir, d), m)
initial_serial = 0x01
try:
if not ca.parent and int(PKI_SELF_SIGNED_SERIAL) > 0:
initial_serial = PKI_SELF_SIGNED_SERIAL+1
except ValueError:
logger.error( "PKI_SELF_SIGNED_SERIAL failed conversion to int!" )
h2s = '%X' % initial_serial
if len(h2s) % 2 == 1:
h2s = '0' + h2s
# initialize certificate serial number
s = open(os.path.join(ca_dir, 'serial'), 'wb')
s.write(h2s)
s.close()
logger.info("Initial serial number set to %s" % h2s)
# initialize CRL serial number
s = open(os.path.join(ca_dir, 'crlnumber'), 'wb')
s.write('01')
s.close()
# touch certificate index file
open(os.path.join(ca_dir, 'index.txt'), 'wb').close()
# do not delete existing CA dir
purge_dirs.discard(ca_dir)
# purge unused CA directories
for d in purge_dirs:
if os.path.isdir(d):
# extra check in order to keep unrelated directory from recursive removal...
# (in case if something wrong with paths)
# probably can be removed when debugging will be finished
if os.path.isfile(os.path.join(d, 'crlnumber')):
logger.debug("Purging CA directory tree %s" % d)
rmtree(d)
else:
logger.warning('Directory %s does not contain any metadata, preserving it' % d)
x509_list =[]
for x509 in pki.models.x509Extension.objects.all():
if x509.is_ca():
x509.ca = True
else:
x509.ca = False
x509_list.append(x509)
# render template and save result to openssl.conf
conf = render_to_string(PKI_OPENSSL_TEMPLATE, {'ca_list': ca_list, 'x509_extensions': x509_list,})
f = open(PKI_OPENSSL_CONF, 'wb')
f.write(conf)
f.close()
except Exception, e:
logger.exception("Refreshing PKI metadata failed: %s" % e)
logger.info("Successfully finished PKI metadata refresh")
class Openssl():
"""OpenSSL command and task wrapper class
instance must be a CertificateAuthority or Certificate object.
"""
def __init__(self, instance):
"""Initialize shared varaibles and verify instance type"""
self.i = instance
self.subj = subject_for_object(self.i)
if self.i.name in PKI_CA_NAME_BLACKLIST:
logger.error("Instance name '%s' is blacklisted!" % self.i.name)
raise
if self.i.parent != None:
self.parent_certs = os.path.join(PKI_DIR, self.i.parent.name, 'certs')
self.crl = os.path.join(PKI_DIR, self.i.parent.name, 'crl', '%s.crl.pem' % self.i.parent.name)
else:
self.parent_certs = os.path.join(PKI_DIR, self.i.name, 'certs')
self.crl = os.path.join(PKI_DIR, self.i.name, 'crl', '%s.crl.pem' % self.i.name)
if isinstance(instance, pki.models.CertificateAuthority):
self.ca_dir = os.path.join(PKI_DIR, self.i.name)
self.key = os.path.join(self.ca_dir, 'private', '%s.key.pem' % self.i.name)
self.pkcs12 = False
self.i.subjaltname = ''
elif isinstance(instance, pki.models.Certificate):
if self.i.parent:
self.ca_dir = os.path.join(PKI_DIR, self.i.parent.name)
else:
self.ca_dir = os.path.join(PKI_DIR, "_SELF_SIGNED_CERTIFICATES")
if not os.path.exists(self.ca_dir):
try:
os.mkdir(self.ca_dir, 0755)
os.mkdir(os.path.join(self.ca_dir, "certs"))
except OSError, e:
logger.exception("Failed to create directories for self-signed certificates %s" % self.ca_dir)
raise
self.key = os.path.join(self.ca_dir, 'certs', '%s.key.pem' % self.i.name)
self.pkcs12 = os.path.join(self.ca_dir, 'certs', '%s.cert.p12' % self.i.name)
if not self.i.subjaltname:
self.i.subjaltname = 'email:copy'
else:
raise Exception( "Given object type is unknown!" )
if not self.i.crl_dpoints:
self.i.crl_dpoints = ''
self.csr = os.path.join(self.ca_dir, 'certs', '%s.csr.pem' % self.i.name)
self.crt = os.path.join(self.ca_dir, 'certs', '%s.cert.pem' % self.i.name)
self.der = os.path.join(self.ca_dir, 'certs', '%s.cert.der' % self.i.name)
## Generate a random string as ENV variable name
self.env_pw = "".join(random.sample(string.letters+string.digits, 10))
def exec_openssl(self, command, env_vars=None):
"""Run a openssl command.
command is prefixed with openssl binary from PKI_OPENSSL_BIN
env_vars is a dict containing the set environment variables
"""
c = [PKI_OPENSSL_BIN]
c.extend(command)
# add PKI_DIR environment variable if caller did not set it
if env_vars:
env_vars.setdefault('PKI_DIR', PKI_DIR)
else:
env_vars = { 'PKI_DIR': PKI_DIR }
proc = Popen( c, shell=False, env=env_vars, stdin=PIPE, stdout=PIPE, stderr=STDOUT )
stdout_value, stderr_value = proc.communicate()
if proc.returncode != 0:
logger.error( 'openssl command "%s" failed with returncode %d' % (c[1], proc.returncode) )
logger.error( stdout_value )
raise Exception( stdout_value )
else:
return stdout_value
def generate_key(self):
"""RSA key generation.
Key will be encrypted with des3 if passphrase is given.
"""
key_type = po = pf = ''
if self.i.passphrase:
key_type = '-des3'
po = '-passout'
pf = 'env:%s' % self.env_pw
command = 'genrsa %s -out %s %s %s %s' % (key_type, self.key, po, pf, self.i.key_length)
self.exec_openssl(command.split(), env_vars={ self.env_pw: str(self.i.passphrase) } )
logger.debug("Finished %s bit private key generation" % self.i.key_length)
def generate_self_signed_cert(self):
"""Generate a self signed root certificate.
Serial is set to user specified value when PKI_SELF_SIGNED_SERIAL > 0
"""
logger.info("Generating new self-signed certificate (CN=%s, x509 extension=%s)" % (self.i.common_name, self.i.extension))
command = ['req', '-config', PKI_OPENSSL_CONF, '-verbose', '-batch', '-new', '-x509', '-subj', self.subj, '-days', str(self.i.valid_days), \
'-extensions', str(self.i.extension), '-key', self.key, '-out', self.crt, '-passin', 'env:%s' % self.env_pw]
try:
if PKI_SELF_SIGNED_SERIAL and int(PKI_SELF_SIGNED_SERIAL) > 0:
command.extend( [ '-set_serial', str(PKI_SELF_SIGNED_SERIAL) ] )
except ValueError, e:
logger.error( "Not setting inital serial number to %s. Fallback to random number" % PKI_SELF_SIGNED_SERIAL )
logger.error( e )
env = { self.env_pw: str(self.i.passphrase), "S_A_N": self.i.subjaltname, "C_D_P": self.i.crl_dpoints }
self.exec_openssl( command, env_vars=env )
logger.info("Finished self-signed certificate creation")
def generate_csr(self):
"""CSR (Certificate Signing Request) generation"""
logger.info("Generating new CSR for %s" % self.i.common_name )
command = ['req', '-config', PKI_OPENSSL_CONF, '-new', '-batch', '-subj', self.subj, '-key', self.key, '-out', self.csr, \
'-days', str(self.i.valid_days), '-passin', 'env:%s' % self.env_pw]
self.exec_openssl(command, env_vars={ self.env_pw: str(self.i.passphrase) })
def generate_der_encoded(self):
"""Generate a DER encoded certificate"""
logger.info( 'Generating DER encoded certificate for %s' % self.i.common_name )
command = 'x509 -in %s -out %s -outform DER' % (self.crt, self.der)
self.exec_openssl(command.split())
def remove_der_encoded(self):
"""Remove a DER encoded certificate"""
if os.path.exists(self.der):
logger.info( 'Removal of DER encoded certificate for %s' % self.i.common_name )
try:
os.remove(self.der)
except OSError, e:
logger.error( "Failed to remove %s" % self.der )
raise Exception( e )
def generate_pkcs12_encoded(self):
"""Generate a PKCS12 encoded certificate.
Passphrase is required as empty passwords not work in batch mode.
"""
command = 'pkcs12 -export -in %s -inkey %s -out %s -passout env:%s' % (self.crt, self.key, self.pkcs12, self.env_pw)
env_vars={ self.env_pw: str(self.i.pkcs12_passphrase), }
if self.i.passphrase:
key_pw = "".join(random.sample(string.letters+string.digits, 10))
command += ' -passin env:%s' % key_pw
env_vars[key_pw] = str(self.i.passphrase)
self.exec_openssl(command.split(), env_vars)
def remove_pkcs12_encoded(self):
"""Remove a PKCS12 encoded certificate if it exists"""
if self.pkcs12 and os.path.exists(self.pkcs12):
logger.info( 'Removal of PKCS12 encoded certificate for %s' % self.i.name )
os.remove(self.pkcs12)
def remove_complete_certificate(self):
"""Remove all files related to the given certificate.
This includes the hash alias, key, csr and the certificate itself.
"""
self.remove_der_encoded()
self.remove_pkcs12_encoded()
hash = "%s/%s.0" % (self.parent_certs, self.get_hash_from_cert())
if os.path.exists(hash):
os.remove(hash)
serial = "%s/%s.pem" % (self.parent_certs, self.get_serial_from_cert())
if os.path.exists(serial):
os.remove(serial)
if os.path.exists(self.csr):
os.remove(self.csr)
if os.path.exists(self.key):
os.remove(self.key)
if os.path.exists(self.crt):
os.remove(self.crt)
def sign_csr(self):
"""Sign the CSR.
Certificate signing and hash creation in CA's certificate directory
"""
env = { self.env_pw: str(self.i.parent_passphrase), "S_A_N": self.i.subjaltname, "C_D_P": self.i.crl_dpoints}
command = 'ca -config %s -name %s -batch -in %s -out %s -days %d -extensions %s -passin env:%s' % \
( PKI_OPENSSL_CONF, self.i.parent.name, self.csr, self.crt, self.i.valid_days, self.i.extension, self.env_pw)
self.exec_openssl(command.split(), env_vars=env)
## Get the just created serial
if self.parent_certs:
serial = self.get_serial_from_cert()
hash = self.get_hash_from_cert()
if os.path.exists('%s/%s.0' % (self.parent_certs, hash)):
os.remove('%s/%s.0' % (self.parent_certs, hash))
os.symlink('%s.pem' % serial, '%s/%s.0' % (self.parent_certs, hash))
def revoke_certificate(self, ppf):
"""Revoke a certificate.
Requires the parents passphrase.
"""
## Check if certificate is already revoked. May have happened during a incomplete transaction
if self.get_revoke_status_from_cert():
logger.info( "Skipping revoke as it already happened" )
return True
command = 'ca -config %s -name %s -batch -revoke %s -passin env:%s' % (PKI_OPENSSL_CONF, self.i.parent.name, self.crt, self.env_pw)
self.exec_openssl(command.split(), env_vars={ self.env_pw: str(ppf) })
def generate_crl(self, ca=None, pf=None):
"""CRL (Certificate Revocation List) generation.
Requires the Certificate Authority and the passphrase.
"""
crl = os.path.join(PKI_DIR, ca, 'crl', '%s.crl.pem' % ca)
command = 'ca -config %s -name %s -gencrl -out %s -crldays 1 -passin env:%s' % (PKI_OPENSSL_CONF, ca, crl, self.env_pw)
self.exec_openssl(command.split(), env_vars={ self.env_pw: str(pf) })
def update_ca_chain_file(self):
"""Build/update the CA chain.
Generates a chain file containing all CA's required to verify the given certificate.
"""
## Build list of parents
chain = []
chain_str = ''
p = self.i.parent
if self.i.parent == None:
chain.append( self.i.name )
else:
chain.append( self.i.name )
while p != None:
chain.append(p.name)
p = p.parent
chain.reverse()
chain_file = os.path.join( PKI_DIR, self.i.name, '%s-chain.cert.pem' % self.i.name )
try:
w = open(chain_file, 'w')
for c in chain:
cert_file = os.path.join( PKI_DIR, c, 'certs', '%s.cert.pem' % c )
command = 'x509 -in %s' % cert_file
output = self.exec_openssl(command.split())
## Get the subject to print it first in the chain file
subj = subject_for_object(self.i)
w.write( '%s\n' % subj )
w.write(output)
w.close()
except:
raise Exception( 'Failed to write chain file!' )
def get_serial_from_cert(self):
"""Extract serial from certificate.
Use openssl to get the serial number from a certificate.
"""
command = 'x509 -in %s -noout -serial' % self.crt
output = self.exec_openssl(command.split())
x = output.rstrip("\n").split('=')
if (len(x[1]) > 2):
sl = re.findall('[a-fA-F0-9]{2}', x[1].lower())
return ':'.join(sl)
return x[1].lower()
def get_hash_from_cert(self):
"""Extract hash from certificate.
Use openssl to get the hash value from a certificate.
"""
command = 'x509 -hash -noout -in %s' % self.crt
output = self.exec_openssl(command.split())
return output.rstrip("\n")
def get_revoke_status_from_cert(self):
"""Get the revoke status from certificate.
Certificate is revoked => True
Certificate is active => False
"""
command = 'crl -text -noout -in %s' % self.crl
output = self.exec_openssl(command.split())
serial_re = re.compile('^\s+Serial\sNumber\:\s+(\w+)')
lines = output.split('\n')
for l in lines:
if serial_re.match(l):
if serial_re.match(l).group(1) == self.i.serial:
logger.info( "The certificate is revoked" )
return True
return False
def dump_certificate(self):
"""Dump a certificate"""
command = "x509 -in %s -noout -text" % self.crt
output = self.exec_openssl(command.split())
return "%s" % output
def rollback(self):
"""Rollback on failed operations"""
pass
| gpl-2.0 |
birdonwheels5/p2pool-apollo | p2pool/util/fixargparse.py | 283 | 1630 | from __future__ import absolute_import
import argparse
import sys
class FixedArgumentParser(argparse.ArgumentParser):
'''
fixes argparse's handling of empty string arguments
and changes @filename behaviour to accept multiple arguments on each line
'''
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if not arg_string or arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def convert_arg_line_to_args(self, arg_line):
return [arg for arg in arg_line.split() if arg.strip()]
| gpl-3.0 |
samehuman/gensim | gensim/test/test_dtm.py | 37 | 1685 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Automated tests for DTM/DIM model
"""
import logging
import gensim
import os
import sys
import unittest
from gensim import corpora
# needed because sample data files are located in the same folder
module_path = os.path.dirname(__file__)
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
class TestDtmModel(unittest.TestCase):
def setUp(self):
self.time_slices = [3, 7]
self.corpus = corpora.mmcorpus.MmCorpus(datapath('dtm_test.mm'))
self.id2word = corpora.Dictionary.load(datapath('dtm_test.dict'))
# first you need to setup the environment variable $DTM_PATH for the dtm executable file
self.dtm_path = os.environ.get('DTM_PATH', None)
if self.dtm_path is None:
if sys.version_info >= (2, 7, 0):
self.skipTest("$DTM_PATH is not properly set up.")
else:
logging.warning("$DTM_PATH is not properly set up.")
def testDtm(self):
if self.dtm_path is not None:
model = gensim.models.wrappers.DtmModel(self.dtm_path, self.corpus, self.time_slices, num_topics=2, id2word=self.id2word, model='dtm', initialize_lda=True)
topics = model.show_topics(topics=2, times=2, topn=10)
def testDim(self):
if self.dtm_path is not None:
model = gensim.models.wrappers.DtmModel(self.dtm_path, self.corpus, self.time_slices, num_topics=2, id2word=self.id2word, model='fixed', initialize_lda=True)
topics = model.show_topics(topics=2, times=2, topn=10)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| gpl-3.0 |
raven47git/python-goose | goose/extractors/videos.py | 15 | 4635 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from goose.extractors import BaseExtractor
from goose.video import Video
VIDEOS_TAGS = ['iframe', 'embed', 'object', 'video']
VIDEO_PROVIDERS = ['youtube', 'vimeo', 'dailymotion', 'kewego']
class VideoExtractor(BaseExtractor):
"""\
Extracts a list of video from Article top node
"""
def __init__(self, config, article):
super(VideoExtractor, self).__init__(config, article)
# candidates
self.candidates = []
# movies
self.movies = []
def get_embed_code(self, node):
return "".join([line.strip() for line in self.parser.nodeToString(node).splitlines()])
def get_embed_type(self, node):
return self.parser.getTag(node)
def get_width(self, node):
return self.parser.getAttribute(node, 'width')
def get_height(self, node):
return self.parser.getAttribute(node, 'height')
def get_src(self, node):
return self.parser.getAttribute(node, 'src')
def get_provider(self, src):
if src:
for provider in VIDEO_PROVIDERS:
if provider in src:
return provider
return None
def get_video(self, node):
"""
Create a video object from a video embed
"""
video = Video()
video.embed_code = self.get_embed_code(node)
video.embed_type = self.get_embed_type(node)
video.width = self.get_width(node)
video.height = self.get_height(node)
video.src = self.get_src(node)
video.provider = self.get_provider(video.src)
return video
def get_iframe_tag(self, node):
return self.get_video(node)
def get_video_tag(self, node):
"""extract html video tags"""
return Video()
def get_embed_tag(self, node):
# embed node may have an object node as parent
# in this case we want to retrieve the object node
# instead of the embed
parent = self.parser.getParent(node)
if parent is not None:
parent_tag = self.parser.getTag(parent)
if parent_tag == 'object':
return self.get_object_tag(node)
return self.get_video(node)
def get_object_tag(self, node):
# test if object tag has en embed child
# in this case we want to remove the embed from
# the candidate list to avoid parsing it twice
child_embed_tag = self.parser.getElementsByTag(node, 'embed')
if child_embed_tag and child_embed_tag[0] in self.candidates:
self.candidates.remove(child_embed_tag[0])
# get the object source
# if wa don't have a src node don't coninue
src_node = self.parser.getElementsByTag(node, tag="param", attr="name", value="movie")
if not src_node:
return None
src = self.parser.getAttribute(src_node[0], "value")
# check provider
provider = self.get_provider(src)
if not provider:
return None
video = self.get_video(node)
video.provider = provider
video.src = src
return video
def get_videos(self):
# candidates node
self.candidates = self.parser.getElementsByTags(self.article.top_node, VIDEOS_TAGS)
# loop all candidates
# and check if src attribute belongs to a video provider
for candidate in self.candidates:
tag = self.parser.getTag(candidate)
attr = "get_%s_tag" % tag
if hasattr(self, attr):
movie = getattr(self, attr)(candidate)
if movie is not None and movie.provider is not None:
self.movies.append(movie)
# append movies list to article
self.article.movies = list(self.movies)
| apache-2.0 |
ximion/dak-dep11 | dak/generate_filelist.py | 6 | 9017 | #!/usr/bin/python
"""
Generate file lists for apt-ftparchive.
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2009 Torsten Werner <twerner@debian.org>
@copyright: 2011 Ansgar Burchardt <ansgar@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
# Ganneff> Please go and try to lock mhy now. After than try to lock NEW.
# twerner> !lock mhy
# dak> twerner: You suck, this is already locked by Ganneff
# Ganneff> now try with NEW
# twerner> !lock NEW
# dak> twerner: also locked NEW
# mhy> Ganneff: oy, stop using me for locks and highlighting me you tall muppet
# Ganneff> hehe :)
################################################################################
from daklib.dbconn import *
from daklib.config import Config
from daklib import utils, daklog
from daklib.dakmultiprocessing import DakProcessPool, PROC_STATUS_SUCCESS, PROC_STATUS_SIGNALRAISED
import apt_pkg, os, stat, sys
from daklib.lists import getSources, getBinaries, getArchAll
def listPath(suite, component, architecture = None, type = None,
incremental_mode = False):
"""returns full path to the list file"""
suffixMap = { 'deb': "binary-",
'udeb': "debian-installer_binary-" }
if architecture:
suffix = suffixMap[type] + architecture.arch_string
else:
suffix = "source"
filename = "%s_%s_%s.list" % \
(suite.suite_name, component.component_name, suffix)
pathname = os.path.join(Config()["Dir::Lists"], filename)
file = utils.open_file(pathname, "a")
timestamp = None
if incremental_mode:
timestamp = os.fstat(file.fileno())[stat.ST_MTIME]
else:
file.seek(0)
file.truncate()
return (file, timestamp)
def writeSourceList(suite_id, component_id, incremental_mode):
session = DBConn().session()
suite = Suite.get(suite_id, session)
component = Component.get(component_id, session)
(file, timestamp) = listPath(suite, component,
incremental_mode = incremental_mode)
message = "sources list for %s %s" % (suite.suite_name, component.component_name)
for _, filename in getSources(suite, component, session, timestamp):
file.write(filename + '\n')
session.rollback()
file.close()
return (PROC_STATUS_SUCCESS, message)
def writeAllList(suite_id, component_id, architecture_id, type, incremental_mode):
session = DBConn().session()
suite = Suite.get(suite_id, session)
component = Component.get(component_id, session)
architecture = Architecture.get(architecture_id, session)
(file, timestamp) = listPath(suite, component, architecture, type,
incremental_mode)
message = "all list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type)
for _, filename in getArchAll(suite, component, architecture, type,
session, timestamp):
file.write(filename + '\n')
session.rollback()
file.close()
return (PROC_STATUS_SUCCESS, message)
def writeBinaryList(suite_id, component_id, architecture_id, type, incremental_mode):
session = DBConn().session()
suite = Suite.get(suite_id, session)
component = Component.get(component_id, session)
architecture = Architecture.get(architecture_id, session)
(file, timestamp) = listPath(suite, component, architecture, type,
incremental_mode)
message = "binary list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type)
for _, filename in getBinaries(suite, component, architecture, type,
session, timestamp):
file.write(filename + '\n')
session.rollback()
file.close()
return (PROC_STATUS_SUCCESS, message)
def usage():
print """Usage: dak generate_filelist [OPTIONS]
Create filename lists for apt-ftparchive.
-s, --suite=SUITE act on this suite
-c, --component=COMPONENT act on this component
-a, --architecture=ARCH act on this architecture
-h, --help show this help and exit
-i, --incremental activate incremental mode
ARCH, COMPONENT and SUITE can be comma (or space) separated list, e.g.
--suite=testing,unstable
Incremental mode appends only newer files to existing lists."""
sys.exit()
def main():
cnf = Config()
Logger = daklog.Logger('generate-filelist')
Arguments = [('h', "help", "Filelist::Options::Help"),
('s', "suite", "Filelist::Options::Suite", "HasArg"),
('c', "component", "Filelist::Options::Component", "HasArg"),
('a', "architecture", "Filelist::Options::Architecture", "HasArg"),
('i', "incremental", "Filelist::Options::Incremental")]
session = DBConn().session()
query_suites = session.query(Suite)
suites = [suite.suite_name for suite in query_suites]
if not cnf.has_key('Filelist::Options::Suite'):
cnf['Filelist::Options::Suite'] = ','.join(suites).encode()
query_components = session.query(Component)
components = \
[component.component_name for component in query_components]
if not cnf.has_key('Filelist::Options::Component'):
cnf['Filelist::Options::Component'] = ','.join(components).encode()
query_architectures = session.query(Architecture)
architectures = \
[architecture.arch_string for architecture in query_architectures]
if not cnf.has_key('Filelist::Options::Architecture'):
cnf['Filelist::Options::Architecture'] = ','.join(architectures).encode()
cnf['Filelist::Options::Help'] = ''
cnf['Filelist::Options::Incremental'] = ''
apt_pkg.parse_commandline(cnf.Cnf, Arguments, sys.argv)
Options = cnf.subtree("Filelist::Options")
if Options['Help']:
usage()
pool = DakProcessPool()
query_suites = query_suites. \
filter(Suite.suite_name.in_(utils.split_args(Options['Suite'])))
query_components = query_components. \
filter(Component.component_name.in_(utils.split_args(Options['Component'])))
query_architectures = query_architectures. \
filter(Architecture.arch_string.in_(utils.split_args(Options['Architecture'])))
def parse_results(message):
# Split out into (code, msg)
code, msg = message
if code == PROC_STATUS_SUCCESS:
Logger.log([msg])
elif code == PROC_STATUS_SIGNALRAISED:
Logger.log(['E: Subprocess recieved signal ', msg])
else:
Logger.log(['E: ', msg])
for suite in query_suites:
suite_id = suite.suite_id
for component in query_components:
component_id = component.component_id
for architecture in query_architectures:
architecture_id = architecture.arch_id
if architecture not in suite.architectures:
pass
elif architecture.arch_string == 'source':
pool.apply_async(writeSourceList,
(suite_id, component_id, Options['Incremental']), callback=parse_results)
elif architecture.arch_string == 'all':
pool.apply_async(writeAllList,
(suite_id, component_id, architecture_id, 'deb',
Options['Incremental']), callback=parse_results)
pool.apply_async(writeAllList,
(suite_id, component_id, architecture_id, 'udeb',
Options['Incremental']), callback=parse_results)
else: # arch any
pool.apply_async(writeBinaryList,
(suite_id, component_id, architecture_id, 'deb',
Options['Incremental']), callback=parse_results)
pool.apply_async(writeBinaryList,
(suite_id, component_id, architecture_id, 'udeb',
Options['Incremental']), callback=parse_results)
pool.close()
pool.join()
# this script doesn't change the database
session.close()
Logger.close()
sys.exit(pool.overall_status())
if __name__ == '__main__':
main()
| gpl-2.0 |
Matt-Deacalion/django | django/db/backends/mysql/creation.py | 59 | 2124 | import subprocess
import sys
from django.db.backends.base.creation import BaseDatabaseCreation
from .client import DatabaseClient
class DatabaseCreation(BaseDatabaseCreation):
def sql_table_creation_suffix(self):
suffix = []
test_settings = self.connection.settings_dict['TEST']
if test_settings['CHARSET']:
suffix.append('CHARACTER SET %s' % test_settings['CHARSET'])
if test_settings['COLLATION']:
suffix.append('COLLATE %s' % test_settings['COLLATION'])
return ' '.join(suffix)
def _clone_test_db(self, number, verbosity, keepdb=False):
qn = self.connection.ops.quote_name
source_database_name = self.connection.settings_dict['NAME']
target_database_name = self.get_test_db_clone_settings(number)['NAME']
with self._nodb_connection.cursor() as cursor:
try:
cursor.execute("CREATE DATABASE %s" % qn(target_database_name))
except Exception as e:
if keepdb:
return
try:
if verbosity >= 1:
print("Destroying old test database '%s'..." % self.connection.alias)
cursor.execute("DROP DATABASE %s" % qn(target_database_name))
cursor.execute("CREATE DATABASE %s" % qn(target_database_name))
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
dump_cmd = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict)
dump_cmd[0] = 'mysqldump'
dump_cmd[-1] = source_database_name
load_cmd = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict)
load_cmd[-1] = target_database_name
dump_proc = subprocess.Popen(dump_cmd, stdout=subprocess.PIPE)
load_proc = subprocess.Popen(load_cmd, stdin=dump_proc.stdout, stdout=subprocess.PIPE)
dump_proc.stdout.close() # allow dump_proc to receive a SIGPIPE if load_proc exits.
load_proc.communicate()
| bsd-3-clause |
emc-openstack/storops | storops/vnx/xmlapi_parser.py | 1 | 8578 | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import logging
import re
import six
from xml import etree
__author__ = 'Jay Xu'
log = logging.getLogger(__name__)
class XMLAPIParser(object):
def __init__(self):
# The following Boolean acts as the flag for the common sub-element.
# For instance:
# <CifsServers>
# <li> server_1 </li>
# </CifsServers>
# <Alias>
# <li> interface_1 </li>
# </Alias>
self.tag = None
self.elt = {}
self.stack = []
@staticmethod
def _delete_ns(tag):
i = tag.find('}')
if i >= 0:
tag = tag[i + 1:]
return tag
def parse(self, xml):
result = {
'type': None,
'taskId': None,
'maxSeverity': None,
'objects': [],
'problems': [],
}
events = ("start", "end")
context = etree.ElementTree.iterparse(six.BytesIO(xml.encode('utf-8')),
events=events)
for action, elem in context:
self.tag = self._delete_ns(elem.tag)
func = self._get_func(action, self.tag)
self.track_stack(action, elem)
if func in vars(XMLAPIParser):
if action == 'start':
eval('self.' + func)(elem, result)
elif action == 'end':
eval('self.' + func)(elem, result)
return result
def track_stack(self, action, elem):
if action == 'start':
self.stack.append(elem)
elif action == 'end':
self.stack.pop()
@staticmethod
def _get_func(action, tag):
if tag == 'W2KServerData':
return action + '_' + 'w2k_server_data'
temp_list = re.sub(r"([A-Z])", r" \1", tag).split()
if temp_list:
func_name = action + '_' + '_'.join(temp_list)
else:
func_name = action + '_' + tag
return func_name.lower()
@staticmethod
def _copy_property(source, target):
for key in source:
target[key] = source[key]
@classmethod
def _append_elm_property(cls, elm, result, identifier):
for obj in result['objects']:
if cls.has_identifier(obj, elm, identifier):
for key, value in elm.attrib.items():
obj[key] = value
@staticmethod
def has_identifier(obj, elm, identifier):
return (identifier in obj and
identifier in elm.attrib and
elm.attrib[identifier] == obj[identifier])
def _append_element(self, elm, result, identifier):
sub_elm = {}
self._copy_property(elm.attrib, sub_elm)
for obj in result['objects']:
if self.has_identifier(obj, elm, identifier):
if self.tag in obj:
obj[self.tag].append(sub_elm)
else:
obj[self.tag] = [sub_elm]
def start_task_response(self, elm, result):
result['type'] = 'TaskResponse'
self._copy_property(elm.attrib, result)
@staticmethod
def start_fault(_, result):
result['type'] = 'Fault'
def _parent_tag(self):
if len(self.stack) >= 2:
parent = self.stack[-2]
ret = self._delete_ns(parent.tag)
else:
ret = None
return ret
def start_status(self, elm, result):
parent_tag = self._parent_tag()
if parent_tag == 'TaskResponse':
result['maxSeverity'] = elm.attrib['maxSeverity']
elif parent_tag in ['MoverStatus', 'Vdm', 'MoverHost']:
self.elt['maxSeverity'] = elm.attrib['maxSeverity']
def start_query_status(self, elm, result):
result['type'] = 'QueryStatus'
self._copy_property(elm.attrib, result)
def start_problem(self, elm, result):
self.elt = {}
self._copy_property(elm.attrib, self.elt)
result['problems'].append(self.elt)
def start_description(self, elm, _):
self.elt['Description'] = elm.text
def start_action(self, elm, _):
self.elt['Action'] = elm.text
def start_diagnostics(self, elm, _):
self.elt['Diagnostics'] = elm.text
def start_file_system(self, elm, result):
self._as_object(elm, result)
def start_file_system_capacity_info(self, elm, result):
identifier = 'fileSystem'
self._append_elm_property(elm, result, identifier)
def start_storage_pool(self, elm, result):
self._as_object(elm, result)
def start_system_storage_pool_data(self, elm, _):
self._copy_property(elm.attrib, self.elt)
def start_mover(self, elm, result):
self._as_object(elm, result)
def start_mover_host(self, elm, result):
self._as_object(elm, result)
def start_nfs_export(self, elm, result):
self._as_object(elm, result)
def _as_object(self, elm, result):
self.elt = {}
self._copy_property(elm.attrib, self.elt)
result['objects'].append(self.elt)
def start_mover_status(self, elm, result):
identifier = 'mover'
self._append_elm_property(elm, result, identifier)
def start_mover_route(self, elm, result):
self._append_element(elm, result, 'mover')
def start_mover_deduplication_settings(self, elm, result):
self._append_element(elm, result, 'mover')
def start_mover_dns_domain(self, elm, result):
self._append_element(elm, result, 'mover')
def start_mover_interface(self, elm, result):
self._append_element(elm, result, 'mover')
def start_logical_network_device(self, elm, result):
self._append_element(elm, result, 'mover')
def start_vdm(self, elm, result):
self._as_object(elm, result)
def _add_element(self, name, item):
if name not in self.elt:
self.elt[name] = []
self.elt[name].append(item)
def start_li(self, elm, _):
parent_tag = self._parent_tag()
host_nodes = ('AccessHosts', 'RwHosts', 'RoHosts', 'RootHosts')
if parent_tag == 'CifsServers':
self._add_element('CifsServers', elm.text)
elif parent_tag == 'Aliases':
self._add_element('Aliases', elm.text)
elif parent_tag == 'Interfaces':
self._add_element('Interfaces', elm.text)
elif parent_tag in host_nodes:
if parent_tag not in self.elt:
self.elt[parent_tag] = []
self.elt[parent_tag].append(elm.text)
def start_cifs_server(self, elm, result):
self._as_object(elm, result)
def start_w2k_server_data(self, elm, _):
self._copy_property(elm.attrib, self.elt)
def start_cifs_share(self, elm, result):
self._as_object(elm, result)
def start_checkpoint(self, elm, result):
self._as_object(elm, result)
def start_ro_file_system_hosts(self, elm, _):
self._copy_property(elm.attrib, self.elt)
def start_standalone_server_data(self, elm, _):
self._copy_property(elm.attrib, self.elt)
def start_fibre_channel_device_data(self, elm, _):
self._copy_attrib_to_parent(elm)
def start_network_device_data(self, elm, _):
self._copy_attrib_to_parent(elm)
def _copy_attrib_to_parent(self, elm):
if len(self.stack) >= 2:
parent = self.stack[-2]
for k, v in elm.attrib.items():
parent.attrib[k] = v
def start_mover_motherboard(self, elm, result):
self._append_element(elm, result, 'moverHost')
def end_physical_device(self, elm, result):
self._append_element(elm, result, 'moverHost')
def start_fc_descriptor(self, elm, result):
self._append_element(elm, result, 'moverHost')
def start_mount(self, elm, result):
self._as_object(elm, result)
| apache-2.0 |
manjunaths/tensorflow | tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py | 16 | 14994 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the TFExampleDecoder its associated helper classes.
The TFExampleDecode is a DataDecoder used to decode TensorFlow Example protos.
In order to do so each requested item must be paired with one or more Example
features that are parsed to produce the Tensor-based manifestation of the item.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.slim.python.slim.data import data_decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
class ItemHandler(object):
"""Specifies the item-to-Features mapping for tf.parse_example.
An ItemHandler both specifies a list of Features used for parsing an Example
proto as well as a function that post-processes the results of Example
parsing.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, keys):
"""Constructs the handler with the name of the tf.Feature keys to use.
See third_party/tensorflow/core/example/feature.proto
Args:
keys: the name of the TensorFlow Example Feature.
"""
if not isinstance(keys, (tuple, list)):
keys = [keys]
self._keys = keys
@property
def keys(self):
return self._keys
@abc.abstractmethod
def tensors_to_item(self, keys_to_tensors):
"""Maps the given dictionary of tensors to the requested item.
Args:
keys_to_tensors: a mapping of TF-Example keys to parsed tensors.
Returns:
the final tensor representing the item being handled.
"""
pass
class ItemHandlerCallback(ItemHandler):
"""An ItemHandler that converts the parsed tensors via a given function.
Unlike other ItemHandlers, the ItemHandlerCallback resolves its item via
a callback function rather than using prespecified behavior.
"""
def __init__(self, keys, func):
"""Initializes the ItemHandler.
Args:
keys: a list of TF-Example keys.
func: a function that takes as an argument a dictionary from `keys` to
parsed Tensors.
"""
super(ItemHandlerCallback, self).__init__(keys)
self._func = func
def tensors_to_item(self, keys_to_tensors):
return self._func(keys_to_tensors)
class BoundingBox(ItemHandler):
"""An ItemHandler that concatenates a set of parsed Tensors to Bounding Boxes.
"""
def __init__(self, keys=None, prefix=None):
"""Initialize the bounding box handler.
Args:
keys: A list of four key names representing the ymin, xmin, ymax, mmax
prefix: An optional prefix for each of the bounding box keys.
If provided, `prefix` is appended to each key in `keys`.
Raises:
ValueError: if keys is not `None` and also not a list of exactly 4 keys
"""
if keys is None:
keys = ['ymin', 'xmin', 'ymax', 'xmax']
elif len(keys) != 4:
raise ValueError('BoundingBox expects 4 keys but got {}'.format(
len(keys)))
self._prefix = prefix
self._keys = keys
self._full_keys = [prefix + k for k in keys]
super(BoundingBox, self).__init__(self._full_keys)
def tensors_to_item(self, keys_to_tensors):
"""Maps the given dictionary of tensors to a contatenated list of bboxes.
Args:
keys_to_tensors: a mapping of TF-Example keys to parsed tensors.
Returns:
[num_boxes, 4] tensor of bounding box coordinates,
i.e. 1 bounding box per row, in order [y_min, x_min, y_max, x_max].
"""
sides = []
for key in self._full_keys:
side = array_ops.expand_dims(keys_to_tensors[key].values, 0)
sides.append(side)
bounding_box = array_ops.concat(sides, 0)
return array_ops.transpose(bounding_box)
class Tensor(ItemHandler):
"""An ItemHandler that returns a parsed Tensor."""
def __init__(self, tensor_key, shape_keys=None, shape=None, default_value=0):
"""Initializes the Tensor handler.
Tensors are, by default, returned without any reshaping. However, there are
two mechanisms which allow reshaping to occur at load time. If `shape_keys`
is provided, both the `Tensor` corresponding to `tensor_key` and
`shape_keys` is loaded and the former `Tensor` is reshaped with the values
of the latter. Alternatively, if a fixed `shape` is provided, the `Tensor`
corresponding to `tensor_key` is loaded and reshape appropriately.
If neither `shape_keys` nor `shape` are provided, the `Tensor` will be
returned without any reshaping.
Args:
tensor_key: the name of the `TFExample` feature to read the tensor from.
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if both `shape_keys` and `shape` are specified.
"""
if shape_keys and shape is not None:
raise ValueError('Cannot specify both shape_keys and shape parameters.')
if shape_keys and not isinstance(shape_keys, list):
shape_keys = [shape_keys]
self._tensor_key = tensor_key
self._shape_keys = shape_keys
self._shape = shape
self._default_value = default_value
keys = [tensor_key]
if shape_keys:
keys.extend(shape_keys)
super(Tensor, self).__init__(keys)
def tensors_to_item(self, keys_to_tensors):
tensor = keys_to_tensors[self._tensor_key]
shape = self._shape
if self._shape_keys:
shape_dims = []
for k in self._shape_keys:
shape_dim = keys_to_tensors[k]
if isinstance(shape_dim, sparse_tensor.SparseTensor):
shape_dim = sparse_ops.sparse_tensor_to_dense(shape_dim)
shape_dims.append(shape_dim)
shape = array_ops.reshape(array_ops.stack(shape_dims), [-1])
if isinstance(tensor, sparse_tensor.SparseTensor):
if shape is not None:
tensor = sparse_ops.sparse_reshape(tensor, shape)
tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
else:
if shape is not None:
tensor = array_ops.reshape(tensor, shape)
return tensor
class SparseTensor(ItemHandler):
"""An ItemHandler for SparseTensors."""
def __init__(self,
indices_key=None,
values_key=None,
shape_key=None,
shape=None,
densify=False,
default_value=0):
"""Initializes the Tensor handler.
Args:
indices_key: the name of the TF-Example feature that contains the ids.
Defaults to 'indices'.
values_key: the name of the TF-Example feature that contains the values.
Defaults to 'values'.
shape_key: the name of the TF-Example feature that contains the shape.
If provided it would be used.
shape: the output shape of the SparseTensor. If `shape_key` is not
provided this `shape` would be used.
densify: whether to convert the SparseTensor into a dense Tensor.
default_value: Scalar value to set when making dense for indices not
specified in the `SparseTensor`.
"""
indices_key = indices_key or 'indices'
values_key = values_key or 'values'
self._indices_key = indices_key
self._values_key = values_key
self._shape_key = shape_key
self._shape = shape
self._densify = densify
self._default_value = default_value
keys = [indices_key, values_key]
if shape_key:
keys.append(shape_key)
super(SparseTensor, self).__init__(keys)
def tensors_to_item(self, keys_to_tensors):
indices = keys_to_tensors[self._indices_key]
values = keys_to_tensors[self._values_key]
if self._shape_key:
shape = keys_to_tensors[self._shape_key]
if isinstance(shape, sparse_tensor.SparseTensor):
shape = sparse_ops.sparse_tensor_to_dense(shape)
elif self._shape:
shape = self._shape
else:
shape = indices.dense_shape
indices_shape = array_ops.shape(indices.indices)
rank = indices_shape[1]
ids = math_ops.to_int64(indices.values)
indices_columns_to_preserve = array_ops.slice(
indices.indices, [0, 0], array_ops.stack([-1, rank - 1]))
new_indices = array_ops.concat(
[indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1)
tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
if self._densify:
tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
return tensor
class Image(ItemHandler):
"""An ItemHandler that decodes a parsed Tensor as an image."""
def __init__(self, image_key=None, format_key=None, shape=None, channels=3):
"""Initializes the image.
Args:
image_key: the name of the TF-Example feature in which the encoded image
is stored.
format_key: the name of the TF-Example feature in which the image format
is stored.
shape: the output shape of the image as 1-D `Tensor`
[height, width, channels]. If provided, the image is reshaped
accordingly. If left as None, no reshaping is done. A shape should
be supplied only if all the stored images have the same shape.
channels: the number of channels in the image.
"""
if not image_key:
image_key = 'image/encoded'
if not format_key:
format_key = 'image/format'
super(Image, self).__init__([image_key, format_key])
self._image_key = image_key
self._format_key = format_key
self._shape = shape
self._channels = channels
def tensors_to_item(self, keys_to_tensors):
"""See base class."""
image_buffer = keys_to_tensors[self._image_key]
image_format = keys_to_tensors[self._format_key]
return self._decode(image_buffer, image_format)
def _decode(self, image_buffer, image_format):
"""Decodes the image buffer.
Args:
image_buffer: The tensor representing the encoded image tensor.
image_format: The image format for the image in `image_buffer`.
Returns:
A tensor that represents decoded image of self._shape, or
(?, ?, self._channels) if self._shape is not specified.
"""
def decode_png():
return image_ops.decode_png(image_buffer, self._channels)
def decode_raw():
return parsing_ops.decode_raw(image_buffer, dtypes.uint8)
def decode_jpg():
return image_ops.decode_jpeg(image_buffer, self._channels)
# For RGBA images JPEG is not a valid decoder option.
if self._channels > 3:
pred_fn_pairs = {
math_ops.logical_or(
math_ops.equal(image_format, 'raw'),
math_ops.equal(image_format, 'RAW')): decode_raw,
}
default_decoder = decode_png
else:
pred_fn_pairs = {
math_ops.logical_or(
math_ops.equal(image_format, 'png'),
math_ops.equal(image_format, 'PNG')): decode_png,
math_ops.logical_or(
math_ops.equal(image_format, 'raw'),
math_ops.equal(image_format, 'RAW')): decode_raw,
}
default_decoder = decode_jpg
image = control_flow_ops.case(
pred_fn_pairs, default=default_decoder, exclusive=True)
image.set_shape([None, None, self._channels])
if self._shape is not None:
image = array_ops.reshape(image, self._shape)
return image
class TFExampleDecoder(data_decoder.DataDecoder):
"""A decoder for TensorFlow Examples.
Decoding Example proto buffers is comprised of two stages: (1) Example parsing
and (2) tensor manipulation.
In the first stage, the tf.parse_example function is called with a list of
FixedLenFeatures and SparseLenFeatures. These instances tell TF how to parse
the example. The output of this stage is a set of tensors.
In the second stage, the resulting tensors are manipulated to provide the
requested 'item' tensors.
To perform this decoding operation, an ExampleDecoder is given a list of
ItemHandlers. Each ItemHandler indicates the set of features for stage 1 and
contains the instructions for post_processing its tensors for stage 2.
"""
def __init__(self, keys_to_features, items_to_handlers):
"""Constructs the decoder.
Args:
keys_to_features: a dictionary from TF-Example keys to either
tf.VarLenFeature or tf.FixedLenFeature instances. See tensorflow's
parsing_ops.py.
items_to_handlers: a dictionary from items (strings) to ItemHandler
instances. Note that the ItemHandler's are provided the keys that they
use to return the final item Tensors.
"""
self._keys_to_features = keys_to_features
self._items_to_handlers = items_to_handlers
def list_items(self):
"""See base class."""
return self._items_to_handlers.keys()
def decode(self, serialized_example, items=None):
"""Decodes the given serialized TF-example.
Args:
serialized_example: a serialized TF-example tensor.
items: the list of items to decode. These must be a subset of the item
keys in self._items_to_handlers. If `items` is left as None, then all
of the items in self._items_to_handlers are decoded.
Returns:
the decoded items, a list of tensor.
"""
example = parsing_ops.parse_single_example(serialized_example,
self._keys_to_features)
# Reshape non-sparse elements just once:
for k in self._keys_to_features:
v = self._keys_to_features[k]
if isinstance(v, parsing_ops.FixedLenFeature):
example[k] = array_ops.reshape(example[k], v.shape)
if not items:
items = self._items_to_handlers.keys()
outputs = []
for item in items:
handler = self._items_to_handlers[item]
keys_to_tensors = {key: example[key] for key in handler.keys}
outputs.append(handler.tensors_to_item(keys_to_tensors))
return outputs
| apache-2.0 |
Reflexe/doc_to_pdf | Windows/program/python-core-3.5.0/lib/encodings/cp1257.py | 272 | 13374 | """ Python Character Mapping Codec cp1257 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1257.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1257',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\ufffe' # 0x83 -> UNDEFINED
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\ufffe' # 0x88 -> UNDEFINED
'\u2030' # 0x89 -> PER MILLE SIGN
'\ufffe' # 0x8A -> UNDEFINED
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x8C -> UNDEFINED
'\xa8' # 0x8D -> DIAERESIS
'\u02c7' # 0x8E -> CARON
'\xb8' # 0x8F -> CEDILLA
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\u2122' # 0x99 -> TRADE MARK SIGN
'\ufffe' # 0x9A -> UNDEFINED
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x9C -> UNDEFINED
'\xaf' # 0x9D -> MACRON
'\u02db' # 0x9E -> OGONEK
'\ufffe' # 0x9F -> UNDEFINED
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\ufffe' # 0xA1 -> UNDEFINED
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\ufffe' # 0xA5 -> UNDEFINED
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xc6' # 0xAF -> LATIN CAPITAL LETTER AE
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xe6' # 0xBF -> LATIN SMALL LETTER AE
'\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON
'\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE
'\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON
'\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON
'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE
'\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE
'\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK
'\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK
'\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON
'\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK
'\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE
'\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE
'\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA
'\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA
'\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON
'\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA
'\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON
'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
'\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK
'\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE
'\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE
'\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON
'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mpl-2.0 |
beiko-lab/gengis | bin/Lib/test/test_hashlib.py | 9 | 13976 | # Test hashlib module
#
# $Id$
#
# Copyright (C) 2005-2010 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
#
import array
import hashlib
import itertools
import sys
try:
import threading
except ImportError:
threading = None
import unittest
import warnings
from test import test_support
from test.test_support import _4G, precisionbigmemtest
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = hasattr(sys, 'gettotalrefcount')
def hexstr(s):
import string
h = string.hexdigits
r = ''
for c in s:
i = ord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
class HashLibTestCase(unittest.TestCase):
supported_hash_names = ( 'md5', 'MD5', 'sha1', 'SHA1',
'sha224', 'SHA224', 'sha256', 'SHA256',
'sha384', 'SHA384', 'sha512', 'SHA512' )
_warn_on_extension_import = COMPILED_WITH_PYDEBUG
def _conditional_import_module(self, module_name):
"""Import a module and return a reference to it or None on failure."""
try:
exec('import '+module_name)
except ImportError, error:
if self._warn_on_extension_import:
warnings.warn('Did a C extension fail to compile? %s' % error)
return locals().get(module_name)
def __init__(self, *args, **kwargs):
algorithms = set()
for algorithm in self.supported_hash_names:
algorithms.add(algorithm.lower())
self.constructors_to_test = {}
for algorithm in algorithms:
self.constructors_to_test[algorithm] = set()
# For each algorithm, test the direct constructor and the use
# of hashlib.new given the algorithm name.
for algorithm, constructors in self.constructors_to_test.items():
constructors.add(getattr(hashlib, algorithm))
def _test_algorithm_via_hashlib_new(data=None, _alg=algorithm):
if data is None:
return hashlib.new(_alg)
return hashlib.new(_alg, data)
constructors.add(_test_algorithm_via_hashlib_new)
_hashlib = self._conditional_import_module('_hashlib')
if _hashlib:
# These two algorithms should always be present when this module
# is compiled. If not, something was compiled wrong.
assert hasattr(_hashlib, 'openssl_md5')
assert hasattr(_hashlib, 'openssl_sha1')
for algorithm, constructors in self.constructors_to_test.items():
constructor = getattr(_hashlib, 'openssl_'+algorithm, None)
if constructor:
constructors.add(constructor)
_md5 = self._conditional_import_module('_md5')
if _md5:
self.constructors_to_test['md5'].add(_md5.new)
_sha = self._conditional_import_module('_sha')
if _sha:
self.constructors_to_test['sha1'].add(_sha.new)
_sha256 = self._conditional_import_module('_sha256')
if _sha256:
self.constructors_to_test['sha224'].add(_sha256.sha224)
self.constructors_to_test['sha256'].add(_sha256.sha256)
_sha512 = self._conditional_import_module('_sha512')
if _sha512:
self.constructors_to_test['sha384'].add(_sha512.sha384)
self.constructors_to_test['sha512'].add(_sha512.sha512)
super(HashLibTestCase, self).__init__(*args, **kwargs)
def test_hash_array(self):
a = array.array("b", range(10))
constructors = self.constructors_to_test.itervalues()
for cons in itertools.chain.from_iterable(constructors):
c = cons(a)
c.hexdigest()
def test_algorithms_attribute(self):
self.assertEqual(hashlib.algorithms,
tuple([_algo for _algo in self.supported_hash_names if
_algo.islower()]))
def test_unknown_hash(self):
try:
hashlib.new('spam spam spam spam spam')
except ValueError:
pass
else:
self.assertTrue(0 == "hashlib didn't reject bogus hash name")
def test_get_builtin_constructor(self):
get_builtin_constructor = hashlib.__dict__[
'__get_builtin_constructor']
self.assertRaises(ValueError, get_builtin_constructor, 'test')
try:
import _md5
except ImportError:
pass
# This forces an ImportError for "import _md5" statements
sys.modules['_md5'] = None
try:
self.assertRaises(ValueError, get_builtin_constructor, 'md5')
finally:
if '_md5' in locals():
sys.modules['_md5'] = _md5
else:
del sys.modules['_md5']
def test_hexdigest(self):
for name in self.supported_hash_names:
h = hashlib.new(name)
self.assertTrue(hexstr(h.digest()) == h.hexdigest())
def test_large_update(self):
aas = 'a' * 128
bees = 'b' * 127
cees = 'c' * 126
abcs = aas + bees + cees
for name in self.supported_hash_names:
m1 = hashlib.new(name)
m1.update(aas)
m1.update(bees)
m1.update(cees)
m2 = hashlib.new(name)
m2.update(abcs)
self.assertEqual(m1.digest(), m2.digest(), name+' update problem.')
m3 = hashlib.new(name, abcs)
self.assertEqual(m1.digest(), m3.digest(), name+' new problem.')
def check(self, name, data, digest):
constructors = self.constructors_to_test[name]
# 2 is for hashlib.name(...) and hashlib.new(name, ...)
self.assertGreaterEqual(len(constructors), 2)
for hash_object_constructor in constructors:
computed = hash_object_constructor(data).hexdigest()
self.assertEqual(
computed, digest,
"Hash algorithm %s constructed using %s returned hexdigest"
" %r for %d byte input data that should have hashed to %r."
% (name, hash_object_constructor,
computed, len(data), digest))
def check_unicode(self, algorithm_name):
# Unicode objects are not allowed as input.
expected = hashlib.new(algorithm_name, str(u'spam')).hexdigest()
self.check(algorithm_name, u'spam', expected)
def test_unicode(self):
# In python 2.x unicode is auto-encoded to the system default encoding
# when passed to hashlib functions.
self.check_unicode('md5')
self.check_unicode('sha1')
self.check_unicode('sha224')
self.check_unicode('sha256')
self.check_unicode('sha384')
self.check_unicode('sha512')
def test_case_md5_0(self):
self.check('md5', '', 'd41d8cd98f00b204e9800998ecf8427e')
def test_case_md5_1(self):
self.check('md5', 'abc', '900150983cd24fb0d6963f7d28e17f72')
def test_case_md5_2(self):
self.check('md5', 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'd174ab98d277d9f5a5611c2c9f419d9f')
@precisionbigmemtest(size=_4G + 5, memuse=1)
def test_case_md5_huge(self, size):
if size == _4G + 5:
try:
self.check('md5', 'A'*size, 'c9af2dff37468ce5dfee8f2cfc0a9c6d')
except OverflowError:
pass # 32-bit arch
@precisionbigmemtest(size=_4G - 1, memuse=1)
def test_case_md5_uintmax(self, size):
if size == _4G - 1:
try:
self.check('md5', 'A'*size, '28138d306ff1b8281f1a9067e1a1a2b3')
except OverflowError:
pass # 32-bit arch
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
def test_case_sha1_0(self):
self.check('sha1', "",
"da39a3ee5e6b4b0d3255bfef95601890afd80709")
def test_case_sha1_1(self):
self.check('sha1', "abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_sha1_2(self):
self.check('sha1', "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_sha1_3(self):
self.check('sha1', "a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
# use the examples from Federal Information Processing Standards
# Publication 180-2, Secure Hash Standard, 2002 August 1
# http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
def test_case_sha224_0(self):
self.check('sha224', "",
"d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f")
def test_case_sha224_1(self):
self.check('sha224', "abc",
"23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7")
def test_case_sha224_2(self):
self.check('sha224',
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"75388b16512776cc5dba5da1fd890150b0c6455cb4f58b1952522525")
def test_case_sha224_3(self):
self.check('sha224', "a" * 1000000,
"20794655980c91d8bbb4c1ea97618a4bf03f42581948b2ee4ee7ad67")
def test_case_sha256_0(self):
self.check('sha256', "",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
def test_case_sha256_1(self):
self.check('sha256', "abc",
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad")
def test_case_sha256_2(self):
self.check('sha256',
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1")
def test_case_sha256_3(self):
self.check('sha256', "a" * 1000000,
"cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0")
def test_case_sha384_0(self):
self.check('sha384', "",
"38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da"+
"274edebfe76f65fbd51ad2f14898b95b")
def test_case_sha384_1(self):
self.check('sha384', "abc",
"cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed"+
"8086072ba1e7cc2358baeca134c825a7")
def test_case_sha384_2(self):
self.check('sha384',
"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"09330c33f71147e83d192fc782cd1b4753111b173b3b05d22fa08086e3b0f712"+
"fcc7c71a557e2db966c3e9fa91746039")
def test_case_sha384_3(self):
self.check('sha384', "a" * 1000000,
"9d0e1809716474cb086e834e310a4a1ced149e9c00f248527972cec5704c2a5b"+
"07b8b3dc38ecc4ebae97ddd87f3d8985")
def test_case_sha512_0(self):
self.check('sha512', "",
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce"+
"47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e")
def test_case_sha512_1(self):
self.check('sha512', "abc",
"ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a"+
"2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f")
def test_case_sha512_2(self):
self.check('sha512',
"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018"+
"501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909")
def test_case_sha512_3(self):
self.check('sha512', "a" * 1000000,
"e718483d0ce769644e2e42c7bc15b4638e1f98b13b2044285632a803afa973eb"+
"de0ff244877ea60a4cb0432ce577c31beb009c5c2c49aa2e4eadb217ad8cc09b")
@unittest.skipUnless(threading, 'Threading required for this test.')
@test_support.reap_threads
def test_threaded_hashing(self):
# Updating the same hash object from several threads at once
# using data chunk sizes containing the same byte sequences.
#
# If the internal locks are working to prevent multiple
# updates on the same object from running at once, the resulting
# hash will be the same as doing it single threaded upfront.
hasher = hashlib.sha1()
num_threads = 5
smallest_data = 'swineflu'
data = smallest_data*200000
expected_hash = hashlib.sha1(data*num_threads).hexdigest()
def hash_in_chunks(chunk_size, event):
index = 0
while index < len(data):
hasher.update(data[index:index+chunk_size])
index += chunk_size
event.set()
events = []
for threadnum in xrange(num_threads):
chunk_size = len(data) // (10**threadnum)
assert chunk_size > 0
assert chunk_size % len(smallest_data) == 0
event = threading.Event()
events.append(event)
threading.Thread(target=hash_in_chunks,
args=(chunk_size, event)).start()
for event in events:
event.wait()
self.assertEqual(expected_hash, hasher.hexdigest())
def test_main():
test_support.run_unittest(HashLibTestCase)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
McDermott-Group/LabRAD | LabRAD/TestScripts/fpgaTest/pyle/build/lib/pyle/dataking/resonator.py | 2 | 16727 | '''
Created on Sep 3, 2011
@author: Daniel Sank
'''
import numpy as np
from pyle.dataking import util
from pyle.util import sweeptools as st
from pyle.dataking import envelopehelpers as eh
from pyle.dataking.fpgaseq import runQubits
from pyle.dataking import utilMultilevels
import pyle.envelopes as env
from pyle.plotting import dstools
from pyle.fitting import fitting
from pyle.util import structures
from pyle.plotting import dstools as ds
from pyle.pipeline import returnValue
from pyle.dataking import squid
#from pyle.dataking import multiqubit as mq
import sweeps
import labrad
from labrad.units import Unit
us,ns,MHz,GHz = (Unit(s) for s in ['us','ns','MHz','GHz'])
from scipy.special import erf,erfc
from scipy.optimize import leastsq
def resonatorSpectroscopy(Sample, measure, measureR, paramName, freqScan=None, pulseTime=None, uwaveAmp=None,
swapTime=300*ns, stats=600L,
name='Resonator spectroscopy', save=True, collect=False, noisy=True, update=True):
"""Resonant drive of a resonator with detection by resonator->qubit swap"""
sample, qubits = util.loadDeviceType(Sample, 'phaseQubit')
sample, resonators, Resonators = util.loadDeviceType(Sample, 'resonator', write_access=True)
q = qubits[measure]
r = resonators[measureR]
R = Resonators[measureR]
q['readout']=True
if freqScan is None:
f = st.nearest(r['freq'][GHz], 0.001)
freqScan = st.r[f-0.02:f+0.02:0.0005,GHz]
if uwaveAmp is None:
uwaveAmp = r['spectroscopyAmp']
if pulseTime is None:
pulseTime = r['spectroscopyLen']
axes = [(uwaveAmp, 'Microwave Amplitude'), (freqScan, 'Frequency')]
kw = {'stats': stats}
dataset = sweeps.prepDataset(sample, name, axes, measure=measure, kw=kw)
swapAmp = q['swapAmp'+paramName]
def func(server, amp, f):
#Determine resonator parameters
sidebandFreq = f-r['fc']
r['spectroscopyAmp'] = amp
#Excite resonator
r.xy = eh.spectroscopyPulse(r, 0, sidebandFreq)
#Use qubit to grab resonator excitation
q.z = env.rect(r['spectroscopyLen']+20*ns, swapTime, swapAmp)+eh.measurePulse(q, r['spectroscopyLen']+swapTime+30*ns)
#eh.correctCrosstalkZ(qubits)
return runQubits(server, qubits+resonators, stats, probs=[1])
data = sweeps.grid(func, axes, dataset=save and dataset, noisy=noisy)
if update:
squid.adjust_frequency(R, data, paramName='freq')
if collect:
return data
def resonatorT1(Sample,measure,measureR,paramName,delay=None,stats=1200L, name='resonator T1',
save=True, collect=True, noisy=True, update=True, plot=False):
sample, qubits = util.loadQubits(Sample)
sample, resonators, Resonators = util.loadDeviceType(Sample,'resonator',write_access=True)
qubit = qubits[measure]
Resonator = Resonators[measureR]
if delay is None:
delay = structures.ValueArray(np.hstack((np.linspace(-0.01,0,5),np.logspace(-2,0.845,50))),'us')
#delay = st.r[0:1:0.01,us]
axes = [(delay, 'Idle time')]
kw = {'stats': stats}
dataset = sweeps.prepDataset(sample, paramName+' '+name, axes, measure=measure, kw=kw)
swapTime = qubit['swapTime'+paramName]
swapAmp = qubit['swapAmp'+paramName]
def func(server, delay):
qubit.xy = eh.mix(qubit, eh.piPulseHD(qubit, 0))
qubit.z = env.rect(qubit.piLen/2, swapTime, swapAmp)
qubit.z += env.rect(qubit.piLen/2+swapTime+delay, swapTime, swapAmp)
qubit.z += eh.measurePulse(qubit, qubit.piLen/2+swapTime+delay+swapTime)
qubit['readout'] = True
return runQubits(server, qubits, stats, probs=[1])
sweeps.grid(func, axes, dataset=save and dataset, noisy=noisy)
if update or plot:
with labrad.connect() as cxn:
dv = cxn.data_vault
dataset = dstools.getOneDeviceDataset(dv,-1,session=sample._dir,
deviceName='res',averaged=False)
result = fitting.t1(dataset,timeRange=(20.0*ns,delay[-1]))
if plot:
fig = dstools.plotDataset1D(dataset.data,
dataset.variables[0],dataset.variables[1],
marker='.',markersize=15)
ax = fig.get_axes()[0]
ax.plot(dataset.data[:,0],
result['fitFunc'](dataset.data[:,0],result['fit']),'r',
linewidth=3)
ax.grid()
fig.show()
if update:
Resonator['calT1']=result['T1']
def testDelay(Sample, measure, measureR, resName, startTime=st.r[-80:80:2,ns], pulseLength=8*ns,
amp=0.2, stats=1200L, save=True, collect=False, noisy=True, plot=True, update=True,
name='Resonator test delay'):
"""Two widely spaced half cycle outo of phase pulses on the resonator with detection in between"""
sample, qubits = util.loadDeviceType(Sample, 'phaseQubit')
sample, resonators, Resonators = util.loadDeviceType(Sample, 'resonator', write_access=True)
q = qubits[measure]
r = resonators[measureR]
R = Resonators[measureR]
axes = [(startTime, 'uWave Start Time')]
kw = {'stats':stats, 'uwaveAmplitude':amp}
dataset = sweeps.prepDataset(sample, name, axes, measure=measure, kw=kw)
def func(server,start):
r.xy=eh.mix(
r,env.gaussian(start-2.0*pulseLength, pulseLength, amp),freq=r'freq')+eh.mix(r,env.gaussian(start+2.0*pulseLength, pulseLength, amp=-amp), freq='freq')
q.z = env.rect(-q[resName+'SwapTime']/2, q[resName+'SwapTime'], q[resName+'SwapAmp'])+eh.measurePulse(q,q[resName+'SwapTime']/2+100*ns)
q['readout']=True
return runQubits(server,qubits+resonators, stats, probs=[1])
data = sweeps.grid(func,axes,dataset=save and dataset, noisy=noisy)
topLen = 5*pulseLength
transLen = pulseLength/2.0
if plot or update:
with labrad.connect() as cxn:
dv = cxn.data_vault
dataset = dstools.getOneDeviceDataset(dv,-1,session=sample._dir,
deviceName='r',averaged=False)
result = fitting.squarePulse(dataset, topLen, transLen,
timeRange=(-80*ns,80*ns))
offset = result['horzOffset']
print 'uwave lag: %f ns'%-offset['ns']
if plot:
fig = ds.plotDataset1D(dataset.data,dataset.variables[0],dataset.variables[1])
ax = fig.get_axes()[0]
ax.plot(dataset.data[:,0],result['fitFunc'](dataset.data[:,0],result['fit']),
'g',linewidth=4)
ax.grid()
fig.show()
if update:
print 'uwave lag corrected by %f ns'%offset['ns']
R['timingLagUwave']+=offset
if collect:
return data
def fockScan(Sample, measure, measureR, paramName, n=1, scanLen=st.r[0:100:1,ns], detuneAmp=0.0,
excited=False, scanOS=0.0, stats=1500L, buffer=0*ns, piDelay=0.0*ns, measureState=1,
name=None, save=True, collect=False, noisy=True, probe=True):
sample, qubits = util.loadDeviceType(Sample, 'phaseQubit')
#sample, resonators = util.loadDeviceType(Sample, 'resonator')
q = qubits[measure]
utilMultilevels.setMultiKeys(q,2)
utilMultilevels.setMultiKey(q,paramName+'FockTime',1)
utilMultilevels.setMultiKey(q,paramName+'FockOvershoot',1)
fockTimes = q['multiLevels'][paramName+'FockTime']
fockOvershoots = q['multiLevels'][paramName+'FockOvershoot']
swapAmp = q[paramName+'SwapAmp']
if name is None:
name = '%s Fock state |%d> swap length' %(paramName,n)
axes = [(detuneAmp,'Idle detune amp'),(scanLen, 'Swap length adjust'),(scanOS, 'Amplitude overshoot')]
kw = {'stats': stats, 'measureState':measureState}
dataset = sweeps.prepDataset(sample, name, axes, measure=measure, kw=kw)
def func(server,idleAmp,probeTime,overshoot):
t=0*ns
q.xy=env.NOTHING
q.z=env.NOTHING
#Put the resonator in the |n> state
for fockState in range(n):
q.xy += eh.boostState(q,t+q['piLen']/2,state=1)
t += q['piLen']+buffer+piDelay
q.z += env.rect(t, fockTimes[fockState],swapAmp)#,overshoot=fockOvershoots[fockState])
t += fockTimes[fockState]+buffer+4*ns
#Probe the resonator with the qubit for variable time and with variable overshoot)
if excited:
q.xy += eh.mix(q, eh.piPulseHD(q, t+q.piLen/2))
t += q.piLen+buffer
if probe:
q.z += env.rect(t, probeTime, swapAmp, overshoot=overshoot)
t += probeTime+buffer+4*ns
else:
q.z += env.rect(t,probeTime,idleAmp)
t += probeTime
#Measure the qubit
q.z += eh.measurePulse(q, t, state=measureState)
q['readout'] = True
return runQubits(server, qubits, stats, probs=[1])
return sweeps.grid(func, axes, dataset=save and dataset, noisy=noisy, collect=collect)
def coherentScan(Sample, measure, measureR, deviceName, pulseAmp=None, pulseTime=None,
swapTime=st.r[0:100:1,ns], excited=False,
stats=1200, name=None, save=True, collect=True, noisy=True):
sample,qubits = util.loadDeviceType(Sample, 'phaseQubit')
sample,resonators = util.loadDeviceType(Sample, 'resonator')
q,r = qubits[measure],resonators[measureR]
q['readout']=True
if pulseAmp is None:
pulseAmp = r['coherentPulseAmp']
if pulseTime is None:
pulseTime = r['coherentPulseTime']
if name is None:
name = 'Coherent state swap, q=|%s>' %('e' if excited else 'g')
axes = [(swapTime, 'Swap Time'),(pulseTime, 'Pulse Time'),(pulseAmp, 'Pulse Amplitude')]
kw = {'stats':stats}
dataset = sweeps.prepDataset(sample, name, axes, measure=measure, kw=kw)
def func(server,swapTime,pulseTime,pulseAmp):
t=0*ns
r.xy = eh.mix(r,env.flattop(0, pulseTime, r['piFWHM'], pulseAmp), freq=r['freq'])
t += pulseTime+r['piFWHM']
q.z = env.rect(t, swapTime, q[deviceName+'SwapAmp'])
t += swapTime+10*ns
q.z += eh.measurePulse(q, t, state=1)
return runQubits(server, qubits+resonators, stats, probs=[1])
data = sweeps.grid(func, axes, dataset=save and dataset, noisy=noisy)
if noisy:
return data
#Yi's function. Need to massage for use in main code
def arbitraryWigner(Sample, measure, measureR, paramName, swapTimes=None,
offresTimes=None, pulseAngles=None, pulsePhases=None, alpha=[0.5], probePulseLength=st.r[0:300:2,ns],
stats=1200L, SZ=0.0, probePulseDelay=0*ns,
name='arbitrary Wigner', save=True, collect=False, noisy=True):
"""Make an arbitrary state in the resonator and measure the resonator Wigner function
offresTimes - [num,...]: Times to wait with qubit off resonance from resonator before each swap event
pulseAmplitudes - [num,...]: Pulse amplitudes to apply to the qubit for each swap even. Given in DAC units, as with a['piAmp'].
"""
sample, qubits = util.loadDeviceType(Sample, 'phaseQubit')
sample, resonators = util.loadDeviceType(Sample, 'resonator')
q = qubits[measure]
r = resonators[measureR]
if swapTimes is None:
swapTimes = [q[paramName+'SwapTime']]
nPulses = len(swapTimes)
delay = q.piLen/2
rdelay = r.piLen/2
qf = q['f10'] #Qubit frequency
rf = r['freq'] #Resonator frequency
sa = float(q[paramName+'SwapAmp'])
os = float(q[paramName+'SwapOvershoot'])+np.zeros(nPulses)
#sweepPara = [[d,sT] for d in np.array(alpha) for sT in probePulseLength]
def sweep():
for d in np.array(alpha):
for ppl in probePulseLength:
yield (d,ppl)
kw = {'stats': stats}
if offresTimes is None:
offresTimes = np.resize(2*delay,nPulses)
else:
kw['times off resonance'] = offresTimes
if pulseAngles is None:
pulseAngles = np.resize(np.pi,nPulses)
if pulsePhases is None:
pulsePhases = np.resize(0.0, nPulses)
if probePulseDelay is not None:
kw['probe swap pulse delay'] = probePulseDelay
else:
probePulseDelay = 0*ns
axes = [('rm displacement', 're'),('rm displacement', 'im'),('swap pulse length', 'ns')]
dataset = sweeps.prepDataset(sample, name, axes, measure=measure, kw=kw)
def func(server, args):
displacementAmp = args[0]
currLen = args[1]
time = 0.0
q.xy = env.NOTHING
q.z=env.NOTHING
#Put the resonator in an arbitrary state with a series of qubit pulses and swaps
for i in np.arange(len(swapTimes)):
#q.xy += eh.mix(q, eh.rotPulseHD(q, time-delay, angle=pulseAngles[i], phase=pulsePhases[i]+2.0j*np.pi*(qf-rf)*time, alpha=0.5, state=1))
q.xy += eh.boostState(q, time, 1)
time += q['piLen']
q.z += env.rect(time, swapTimes[i], sa,overshoot = os[i])
time += swapTimes[i]
time += offresTimes[i]
resstart = time + probePulseDelay + 2*rdelay
#Try to dump the qubit population in case there is any
##REMOVED##
#Drive the resonator
#The conjugate on the displacement amplitude and drive phase is there because you're driving FROM displacementAmp
#back to the origin!
r.xy = eh.mix(r, env.gaussian(resstart - rdelay, r.piFWHM, np.conjugate(displacementAmp*r['timingPhase'])), freq='freq')
time += 4
time = max(time, resstart)
#Probe the resonator with the qubit
q.z += env.rect(time+12, currLen, sa, overshoot=q[paramName+'SwapOvershoot'])
time += currLen + 4+12
q.z += eh.measurePulse(q, time)
q['readout'] = True
data = yield runQubits(server, qubits, stats=stats, probs=[1])
data = np.hstack(([displacementAmp.real, displacementAmp.imag, currLen], data))
returnValue(data)
result = sweeps.run(func, sweep(), dataset=save and dataset, noisy=noisy)
if collect:
return result
def resdrivephase(Sample, measure, measureR, paramName, tf,points=400, stats=1500, unitAmpl=0.3,
tuneOS=False, name='resonator drive phase', save=True, collect=True, noisy=True):
sample, qubits, Qubits = util.loadDeviceType(Sample, 'phaseQubit', write_access=True)
sample, resonators, Resonators = util.loadDeviceType
q = qubits[measure]
r = resonators[measureR]
R = Resonators[measureR]
angle = np.linspace(0,2*np.pi, points, endpoint=False)
displacement=unitAmpl*np.exp(1j*angle)
if tuneOS:
os = q[paramName+'SwapAmpOvershoot']
else:
os = 0.0
# kw = {'stats': stats}
# dataset = sweeps.prepDataset(sample, name, axes=[('displacement','re'),('displacement','im')], measure=measure, kw=kw)
# def func(server, curr):
# start = 0
# q.xy = eh.mix(q, env.gaussian(start, q.piFWHM, amp = q.piAmp/2), freq = 'f10')
# start += q.piLen/2
# q.z = env.rect(start, q['swapLen'+paraName], q['swapAmp'+paraName],overshoot = os)
# start += q['swapLen'+paraName]+r.piLen/2
# r.xy = eh.mix(r, env.gaussian(start,r.piFWHM, amp = np.conjugate(curr*r.drivePhase)), freq = 'fRes0')
# start += r.piLen/2
# q.z += env.rect(start, q['swapLen'+paraName], q['swapAmp'+paraName],overshoot = os)
# start += q['swapLen'+paraName]
# q.z += eh.measurePulse(q, start)
# q['readout'] = True
# data = yield runQubits(server, qubits, stats, probs=[1])
# data = np.hstack(([curr.real, curr.imag], data))
# returnValue(data)
# result = sweeps.run(func, displacement, dataset=save and dataset, noisy=noisy)
result = arbitraryWigner(sample, measure, measureR, paramName, np.array([tf(1.0)])*ns, pulseAmplitudes=[0.5*q.piAmp],
probePulseLength = st.r[tf(1.0):tf(1.0):1,ns], alpha=displacement,
stats=stats, save=False, collect=True, noisy=False, name='Resonator drive phase')
result = result[:, [0,3]]
result[:,0] = angle
def fitfunc(angle,p):
return p[0]+p[1]*np.cos(angle-p[2])
def errfunc(p):
return result[:,1]-fitfunc(result[:,0],p)
p,ok = leastsq(errfunc, [0.0,100.0,0.0])
if p[1] < 0:
p[1] = -p[1]
p[2] = p[2]+np.pi
p[2] = (p[2]+np.pi)%(2*np.pi)-np.pi
plt.plot(result[:,0],result[:,1])
plt.plot(angle, fitfunc(angle,p))
a = r.drivePhase*np.exp(1.0j*p[2])
print 'Resonator drive Phase correction: %g' % p[2]
R.drivePhase = a/abs(a)
return
| gpl-2.0 |
jpshort/odoo | addons/sale_stock/company.py | 384 | 1524 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class company(osv.osv):
_inherit = 'res.company'
_columns = {
'security_lead': fields.float(
'Security Days', required=True,
help="Margin of error for dates promised to customers. "\
"Products will be scheduled for procurement and delivery "\
"that many days earlier than the actual promised date, to "\
"cope with unexpected delays in the supply chain."),
}
_defaults = {
'security_lead': 0.0,
}
| agpl-3.0 |
r-o-b-b-i-e/pootle | tests/commands/purge_user.py | 8 | 1273 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from django.core.management import call_command
from django.core.management.base import CommandError
@pytest.mark.cmd
def test_purge_user_nouser():
with pytest.raises(CommandError) as e:
call_command('purge_user')
assert "too few arguments" in str(e)
@pytest.mark.cmd
@pytest.mark.django_db
def test_purge_user_single_user(capfd, evil_member):
call_command('purge_user', 'evil_member')
out, err = capfd.readouterr()
assert "User purged: evil_member" in out
@pytest.mark.cmd
@pytest.mark.django_db
def test_purge_user_multiple_users(capfd, member, member2):
call_command('purge_user', 'member', 'member2')
out, err = capfd.readouterr()
assert "User purged: member " in out
assert "User purged: member2 " in out
@pytest.mark.cmd
@pytest.mark.django_db
def test_purge_user_unkownuser():
with pytest.raises(CommandError) as e:
call_command('purge_user', 'not_a_user')
assert "User not_a_user does not exist" in str(e)
| gpl-3.0 |
phanikiran2/Extending-RED-qdisc-in-ns3-to-support--NLRED | waf-tools/boost.py | 88 | 13889 | #!/usr/bin/env python
# encoding: utf-8
#
# partially based on boost.py written by Gernot Vormayr
# written by Ruediger Sonderfeld <ruediger@c-plusplus.de>, 2008
# modified by Bjoern Michaelsen, 2008
# modified by Luca Fossati, 2008
# rewritten for waf 1.5.1, Thomas Nagy, 2008
# rewritten for waf 1.6.2, Sylvain Rouquette, 2011
'''
This is an extra tool, not bundled with the default waf binary.
To add the boost tool to the waf file:
$ ./waf-light --tools=compat15,boost
or, if you have waf >= 1.6.2
$ ./waf update --files=boost
When using this tool, the wscript will look like:
def options(opt):
opt.load('compiler_cxx boost')
def configure(conf):
conf.load('compiler_cxx boost')
conf.check_boost(lib='system filesystem')
def build(bld):
bld(source='main.cpp', target='app', use='BOOST')
Options are generated, in order to specify the location of boost includes/libraries.
The `check_boost` configuration function allows to specify the used boost libraries.
It can also provide default arguments to the --boost-mt command-line arguments.
Everything will be packaged together in a BOOST component that you can use.
When using MSVC, a lot of compilation flags need to match your BOOST build configuration:
- you may have to add /EHsc to your CXXFLAGS or define boost::throw_exception if BOOST_NO_EXCEPTIONS is defined.
Errors: C4530
- boost libraries will try to be smart and use the (pretty but often not useful) auto-linking feature of MSVC
So before calling `conf.check_boost` you might want to disabling by adding
conf.env.DEFINES_BOOST += ['BOOST_ALL_NO_LIB']
Errors:
- boost might also be compiled with /MT, which links the runtime statically.
If you have problems with redefined symbols,
self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB']
self.env['CXXFLAGS_%s' % var] += ['/MD', '/EHsc']
Passing `--boost-linkage_autodetect` might help ensuring having a correct linkage in some basic cases.
'''
import sys
import re
from waflib import Utils, Logs, Errors
from waflib.Configure import conf
from waflib.TaskGen import feature, after_method
BOOST_LIBS = ['/usr/lib/x86_64-linux-gnu', '/usr/lib/i386-linux-gnu',
'/usr/lib', '/usr/local/lib', '/opt/local/lib', '/sw/lib', '/lib']
BOOST_INCLUDES = ['/usr/include', '/usr/local/include', '/opt/local/include', '/sw/include']
BOOST_VERSION_FILE = 'boost/version.hpp'
BOOST_VERSION_CODE = '''
#include <iostream>
#include <boost/version.hpp>
int main() { std::cout << BOOST_LIB_VERSION << std::endl; }
'''
BOOST_ERROR_CODE = '''
#include <boost/system/error_code.hpp>
int main() { boost::system::error_code c; }
'''
BOOST_THREAD_CODE = '''
#include <boost/thread.hpp>
int main() { boost::thread t; }
'''
# toolsets from {boost_dir}/tools/build/v2/tools/common.jam
PLATFORM = Utils.unversioned_sys_platform()
detect_intel = lambda env: (PLATFORM == 'win32') and 'iw' or 'il'
detect_clang = lambda env: (PLATFORM == 'darwin') and 'clang-darwin' or 'clang'
detect_mingw = lambda env: (re.search('MinGW', env.CXX[0])) and 'mgw' or 'gcc'
BOOST_TOOLSETS = {
'borland': 'bcb',
'clang': detect_clang,
'como': 'como',
'cw': 'cw',
'darwin': 'xgcc',
'edg': 'edg',
'g++': detect_mingw,
'gcc': detect_mingw,
'icpc': detect_intel,
'intel': detect_intel,
'kcc': 'kcc',
'kylix': 'bck',
'mipspro': 'mp',
'mingw': 'mgw',
'msvc': 'vc',
'qcc': 'qcc',
'sun': 'sw',
'sunc++': 'sw',
'tru64cxx': 'tru',
'vacpp': 'xlc'
}
def options(opt):
opt.add_option('--boost-includes', type='string',
default='', dest='boost_includes',
help='''path to the boost includes root (~boost root)
e.g. /path/to/boost_1_47_0''')
opt.add_option('--boost-libs', type='string',
default='', dest='boost_libs',
help='''path to the directory where the boost libs are
e.g. /path/to/boost_1_47_0/stage/lib''')
opt.add_option('--boost-mt', action='store_true',
default=False, dest='boost_mt',
help='select multi-threaded libraries')
opt.add_option('--boost-abi', type='string', default='', dest='boost_abi',
help='''select libraries with tags (gd for debug, static is automatically added),
see doc Boost, Getting Started, chapter 6.1''')
opt.add_option('--boost-linkage_autodetect', action="store_true", dest='boost_linkage_autodetect',
help="auto-detect boost linkage options (don't get used to it / might break other stuff)")
opt.add_option('--boost-toolset', type='string',
default='', dest='boost_toolset',
help='force a toolset e.g. msvc, vc90, \
gcc, mingw, mgw45 (default: auto)')
py_version = '%d%d' % (sys.version_info[0], sys.version_info[1])
opt.add_option('--boost-python', type='string',
default=py_version, dest='boost_python',
help='select the lib python with this version \
(default: %s)' % py_version)
@conf
def __boost_get_version_file(self, d):
if not d:
return None
dnode = self.root.find_dir(d)
if dnode:
return dnode.find_node(BOOST_VERSION_FILE)
return None
@conf
def boost_get_version(self, d):
"""silently retrieve the boost version number"""
node = self.__boost_get_version_file(d)
if node:
try:
txt = node.read()
except EnvironmentError:
Logs.error("Could not read the file %r" % node.abspath())
else:
re_but = re.compile('^#define\\s+BOOST_LIB_VERSION\\s+"(.*)"', re.M)
m = re_but.search(txt)
if m:
return m.group(1)
return self.check_cxx(fragment=BOOST_VERSION_CODE, includes=[d], execute=True, define_ret=True)
@conf
def boost_get_includes(self, *k, **kw):
includes = k and k[0] or kw.get('includes', None)
if includes and self.__boost_get_version_file(includes):
return includes
for d in self.environ.get('INCLUDE', '').split(';') + BOOST_INCLUDES:
if self.__boost_get_version_file(d):
return d
if includes:
self.end_msg('headers not found in %s' % includes)
self.fatal('The configuration failed')
else:
self.end_msg('headers not found, please provide a --boost-includes argument (see help)')
self.fatal('The configuration failed')
@conf
def boost_get_toolset(self, cc):
toolset = cc
if not cc:
build_platform = Utils.unversioned_sys_platform()
if build_platform in BOOST_TOOLSETS:
cc = build_platform
else:
cc = self.env.CXX_NAME
if cc in BOOST_TOOLSETS:
toolset = BOOST_TOOLSETS[cc]
return isinstance(toolset, str) and toolset or toolset(self.env)
@conf
def __boost_get_libs_path(self, *k, **kw):
''' return the lib path and all the files in it '''
if 'files' in kw:
return self.root.find_dir('.'), Utils.to_list(kw['files'])
libs = k and k[0] or kw.get('libs', None)
if libs:
path = self.root.find_dir(libs)
files = path.ant_glob('*boost_*')
if not libs or not files:
for d in self.environ.get('LIB', '').split(';') + BOOST_LIBS:
if not d:
continue
path = self.root.find_dir(d)
if path:
files = path.ant_glob('*boost_*')
if files:
break
path = self.root.find_dir(d + '64')
if path:
files = path.ant_glob('*boost_*')
if files:
break
if not path:
if libs:
self.end_msg('libs not found in %s' % libs)
self.fatal('The configuration failed')
else:
self.end_msg('libs not found, please provide a --boost-libs argument (see help)')
self.fatal('The configuration failed')
self.to_log('Found the boost path in %r with the libraries:' % path)
for x in files:
self.to_log(' %r' % x)
return path, files
@conf
def boost_get_libs(self, *k, **kw):
'''
return the lib path and the required libs
according to the parameters
'''
path, files = self.__boost_get_libs_path(**kw)
files = sorted(files, key=lambda f: (len(f.name), f.name), reverse=True)
toolset = self.boost_get_toolset(kw.get('toolset', ''))
toolset_pat = '(-%s[0-9]{0,3})' % toolset
version = '-%s' % self.env.BOOST_VERSION
def find_lib(re_lib, files):
for file in files:
if re_lib.search(file.name):
self.to_log('Found boost lib %s' % file)
return file
return None
def format_lib_name(name):
if name.startswith('lib') and self.env.CC_NAME != 'msvc':
name = name[3:]
return name[:name.rfind('.')]
def match_libs(lib_names, is_static):
libs = []
lib_names = Utils.to_list(lib_names)
if not lib_names:
return libs
t = []
if kw.get('mt', False):
t.append('-mt')
if kw.get('abi', None):
t.append('%s%s' % (is_static and '-s' or '-', kw['abi']))
elif is_static:
t.append('-s')
tags_pat = t and ''.join(t) or ''
ext = is_static and self.env.cxxstlib_PATTERN or self.env.cxxshlib_PATTERN
ext = ext.partition('%s')[2] # remove '%s' or 'lib%s' from PATTERN
for lib in lib_names:
if lib == 'python':
# for instance, with python='27',
# accepts '-py27', '-py2', '27' and '2'
# but will reject '-py3', '-py26', '26' and '3'
tags = '({0})?((-py{2})|(-py{1}(?=[^0-9]))|({2})|({1}(?=[^0-9]))|(?=[^0-9])(?!-py))'.format(tags_pat, kw['python'][0], kw['python'])
else:
tags = tags_pat
# Trying libraries, from most strict match to least one
for pattern in ['boost_%s%s%s%s%s$' % (lib, toolset_pat, tags, version, ext),
'boost_%s%s%s%s$' % (lib, tags, version, ext),
# Give up trying to find the right version
'boost_%s%s%s%s$' % (lib, toolset_pat, tags, ext),
'boost_%s%s%s$' % (lib, tags, ext),
'boost_%s%s$' % (lib, ext),
'boost_%s' % lib]:
self.to_log('Trying pattern %s' % pattern)
file = find_lib(re.compile(pattern), files)
if file:
libs.append(format_lib_name(file.name))
break
else:
self.end_msg('lib %s not found in %s' % (lib, path.abspath()))
self.fatal('The configuration failed')
return libs
return path.abspath(), match_libs(kw.get('lib', None), False), match_libs(kw.get('stlib', None), True)
@conf
def check_boost(self, *k, **kw):
"""
Initialize boost libraries to be used.
Keywords: you can pass the same parameters as with the command line (without "--boost-").
Note that the command line has the priority, and should preferably be used.
"""
if not self.env['CXX']:
self.fatal('load a c++ compiler first, conf.load("compiler_cxx")')
params = {
'lib': k and k[0] or kw.get('lib', None),
'stlib': kw.get('stlib', None)
}
for key, value in self.options.__dict__.items():
if not key.startswith('boost_'):
continue
key = key[len('boost_'):]
params[key] = value and value or kw.get(key, '')
var = kw.get('uselib_store', 'BOOST')
self.start_msg('Checking boost includes')
self.env['INCLUDES_%s' % var] = inc = self.boost_get_includes(**params)
self.env.BOOST_VERSION = self.boost_get_version(inc)
self.end_msg(self.env.BOOST_VERSION)
if Logs.verbose:
Logs.pprint('CYAN', ' path : %s' % self.env['INCLUDES_%s' % var])
if not params['lib'] and not params['stlib']:
return
if 'static' in kw or 'static' in params:
Logs.warn('boost: static parameter is deprecated, use stlib instead.')
self.start_msg('Checking boost libs')
path, libs, stlibs = self.boost_get_libs(**params)
self.env['LIBPATH_%s' % var] = [path]
self.env['STLIBPATH_%s' % var] = [path]
self.env['LIB_%s' % var] = libs
self.env['STLIB_%s' % var] = stlibs
self.end_msg('ok')
if Logs.verbose:
Logs.pprint('CYAN', ' path : %s' % path)
Logs.pprint('CYAN', ' shared libs : %s' % libs)
Logs.pprint('CYAN', ' static libs : %s' % stlibs)
def try_link():
if (params['lib'] and 'system' in params['lib']) or \
params['stlib'] and 'system' in params['stlib']:
self.check_cxx(fragment=BOOST_ERROR_CODE, use=var, execute=False)
if (params['lib'] and 'thread' in params['lib']) or \
params['stlib'] and 'thread' in params['stlib']:
self.check_cxx(fragment=BOOST_THREAD_CODE, use=var, execute=False)
if params.get('linkage_autodetect', False):
self.start_msg("Attempting to detect boost linkage flags")
toolset = self.boost_get_toolset(kw.get('toolset', ''))
if toolset in ('vc',):
# disable auto-linking feature, causing error LNK1181
# because the code wants to be linked against
self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB']
# if no dlls are present, we guess the .lib files are not stubs
has_dlls = False
for x in Utils.listdir(path):
if x.endswith(self.env.cxxshlib_PATTERN % ''):
has_dlls = True
break
if not has_dlls:
self.env['STLIBPATH_%s' % var] = [path]
self.env['STLIB_%s' % var] = libs
del self.env['LIB_%s' % var]
del self.env['LIBPATH_%s' % var]
# we attempt to play with some known-to-work CXXFLAGS combinations
for cxxflags in (['/MD', '/EHsc'], []):
self.env.stash()
self.env["CXXFLAGS_%s" % var] += cxxflags
try:
try_link()
self.end_msg("ok: winning cxxflags combination: %s" % (self.env["CXXFLAGS_%s" % var]))
exc = None
break
except Errors.ConfigurationError as e:
self.env.revert()
exc = e
if exc is not None:
self.end_msg("Could not auto-detect boost linking flags combination, you may report it to boost.py author", ex=exc)
self.fatal('The configuration failed')
else:
self.end_msg("Boost linkage flags auto-detection not implemented (needed ?) for this toolchain")
self.fatal('The configuration failed')
else:
self.start_msg('Checking for boost linkage')
try:
try_link()
except Errors.ConfigurationError as e:
self.end_msg("Could not link against boost libraries using supplied options")
self.fatal('The configuration failed')
self.end_msg('ok')
@feature('cxx')
@after_method('apply_link')
def install_boost(self):
if install_boost.done or not Utils.is_win32 or not self.bld.cmd.startswith('install'):
return
install_boost.done = True
inst_to = getattr(self, 'install_path', '${BINDIR}')
for lib in self.env.LIB_BOOST:
try:
file = self.bld.find_file(self.env.cxxshlib_PATTERN % lib, self.env.LIBPATH_BOOST)
self.bld.install_files(inst_to, self.bld.root.find_node(file))
except:
continue
install_boost.done = False
| gpl-2.0 |
rtrigoso/tastycakes | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/_lassobuiltins.py | 293 | 130675 | # -*- coding: utf-8 -*-
"""
pygments.lexers._lassobuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Built-in Lasso types, traits, methods, and members.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
BUILTINS = {
'Types': [
'null',
'void',
'tag',
'trait',
'integer',
'decimal',
'boolean',
'capture',
'string',
'bytes',
'keyword',
'custom',
'staticarray',
'signature',
'memberstream',
'dsinfo',
'sourcefile',
'array',
'pair',
'opaque',
'filedesc',
'dirdesc',
'locale',
'ucal',
'xml_domimplementation',
'xml_node',
'xml_characterdata',
'xml_document',
'xml_element',
'xml_attr',
'xml_text',
'xml_cdatasection',
'xml_entityreference',
'xml_entity',
'xml_processinginstruction',
'xml_comment',
'xml_documenttype',
'xml_documentfragment',
'xml_notation',
'xml_nodelist',
'xml_namednodemap',
'xml_namednodemap_ht',
'xml_namednodemap_attr',
'xmlstream',
'sqlite3',
'sqlite3_stmt',
'mime_reader',
'curltoken',
'regexp',
'zip_impl',
'zip_file_impl',
'library_thread_loader',
'generateforeachunkeyed',
'generateforeachkeyed',
'eacher',
'queriable_where',
'queriable_select',
'queriable_selectmany',
'queriable_groupby',
'queriable_join',
'queriable_groupjoin',
'queriable_orderby',
'queriable_orderbydescending',
'queriable_thenby',
'queriable_thenbydescending',
'queriable_skip',
'queriable_take',
'queriable_grouping',
'generateseries',
'tie',
'pairup',
'delve',
'repeat',
'pair_compare',
'serialization_object_identity_compare',
'serialization_element',
'serialization_writer_standin',
'serialization_writer_ref',
'serialization_writer',
'serialization_reader',
'tree_nullnode',
'tree_node',
'tree_base',
'map_node',
'map',
'file',
'date',
'dir',
'magick_image',
'ldap',
'os_process',
'java_jnienv',
'jobject',
'jmethodid',
'jfieldid',
'database_registry',
'sqlite_db',
'sqlite_results',
'sqlite_currentrow',
'sqlite_table',
'sqlite_column',
'curl',
'debugging_stack',
'dbgp_server',
'dbgp_packet',
'duration',
'inline_type',
'json_literal',
'json_object',
'list_node',
'list',
'jchar',
'jchararray',
'jbyte',
'jbytearray',
'jfloat',
'jint',
'jshort',
'currency',
'scientific',
'percent',
'dateandtime',
'timeonly',
'net_tcp',
'net_tcpssl',
'net_named_pipe',
'net_udppacket',
'net_udp',
'pdf_typebase',
'pdf_doc',
'pdf_color',
'pdf_barcode',
'pdf_font',
'pdf_image',
'pdf_list',
'pdf_read',
'pdf_table',
'pdf_text',
'pdf_hyphenator',
'pdf_chunk',
'pdf_phrase',
'pdf_paragraph',
'queue',
'set',
'sys_process',
'worker_pool',
'zip_file',
'zip',
'cache_server_element',
'cache_server',
'dns_response',
'component_render_state',
'component',
'component_container',
'document_base',
'document_body',
'document_header',
'text_document',
'data_document',
'email_compose',
'email_pop',
'email_parse',
'email_queue_impl_base',
'email_stage_impl_base',
'fcgi_record',
'web_request_impl',
'fcgi_request',
'include_cache',
'atbegin',
'fastcgi_each_fcgi_param',
'fastcgi_server',
'filemaker_datasource',
'http_document',
'http_document_header',
'http_header_field',
'html_document_head',
'html_document_body',
'raw_document_body',
'bytes_document_body',
'html_attr',
'html_atomic_element',
'html_container_element',
'http_error',
'html_script',
'html_text',
'html_raw',
'html_binary',
'html_json',
'html_cdata',
'html_eol',
'html_div',
'html_span',
'html_br',
'html_hr',
'html_h1',
'html_h2',
'html_h3',
'html_h4',
'html_h5',
'html_h6',
'html_meta',
'html_link',
'html_object',
'html_style',
'html_base',
'html_table',
'html_tr',
'html_td',
'html_th',
'html_img',
'html_form',
'html_fieldset',
'html_legend',
'html_input',
'html_label',
'html_option',
'html_select',
'http_server_web_connection',
'http_server',
'http_server_connection_handler',
'image',
'lassoapp_installer',
'lassoapp_content_rep_halt',
'lassoapp_dirsrc_fileresource',
'lassoapp_dirsrc_appsource',
'lassoapp_livesrc_fileresource',
'lassoapp_livesrc_appsource',
'lassoapp_long_expiring_bytes',
'lassoapp_zip_file_server',
'lassoapp_zipsrc_fileresource',
'lassoapp_zipsrc_appsource',
'lassoapp_compiledsrc_fileresource',
'lassoapp_compiledsrc_appsource',
'lassoapp_manualsrc_appsource',
'log_impl_base',
'portal_impl',
'security_registry',
'memory_session_driver_impl_entry',
'memory_session_driver_impl',
'sqlite_session_driver_impl_entry',
'sqlite_session_driver_impl',
'mysql_session_driver_impl',
'odbc_session_driver_impl',
'session_delete_expired_thread',
'email_smtp',
'client_address',
'client_ip',
'web_node_base',
'web_node_root',
'web_node_content_representation_xhr_container',
'web_node_content_representation_html_specialized',
'web_node_content_representation_css_specialized',
'web_node_content_representation_js_specialized',
'web_node_echo',
'web_error_atend',
'web_response_impl',
'web_router'
],
'Traits': [
'trait_asstring',
'any',
'trait_generator',
'trait_decompose_assignment',
'trait_foreach',
'trait_generatorcentric',
'trait_foreachtextelement',
'trait_finite',
'trait_finiteforeach',
'trait_keyed',
'trait_keyedfinite',
'trait_keyedforeach',
'trait_frontended',
'trait_backended',
'trait_doubleended',
'trait_positionallykeyed',
'trait_expandable',
'trait_frontexpandable',
'trait_backexpandable',
'trait_contractible',
'trait_frontcontractible',
'trait_backcontractible',
'trait_fullymutable',
'trait_keyedmutable',
'trait_endedfullymutable',
'trait_setoperations',
'trait_searchable',
'trait_positionallysearchable',
'trait_pathcomponents',
'trait_readbytes',
'trait_writebytes',
'trait_setencoding',
'trait_readstring',
'trait_writestring',
'trait_hashable',
'trait_each_sub',
'trait_stack',
'trait_list',
'trait_array',
'trait_map',
'trait_close',
'trait_file',
'trait_scalar',
'trait_queriablelambda',
'trait_queriable',
'queriable_asstring',
'trait_serializable',
'trait_treenode',
'trait_json_serialize',
'formattingbase',
'trait_net',
'trait_xml_elementcompat',
'trait_xml_nodecompat',
'web_connection',
'html_element_coreattrs',
'html_element_i18nattrs',
'html_element_eventsattrs',
'html_attributed',
'lassoapp_resource',
'lassoapp_source',
'lassoapp_capabilities',
'session_driver',
'web_node_content_json_specialized',
'web_node',
'web_node_container',
'web_node_content_representation',
'web_node_content',
'web_node_content_document',
'web_node_postable',
'web_node_content_html_specialized',
'web_node_content_css_specialized',
'web_node_content_js_specialized'
],
'Unbound Methods': [
'fail_now',
'register',
'register_thread',
'escape_tag',
'handle',
'handle_failure',
'protect_now',
'threadvar_get',
'threadvar_set',
'threadvar_set_asrt',
'threadvar_find',
'abort_now',
'abort_clear',
'failure_clear',
'var_keys',
'var_values',
'staticarray_join',
'suspend',
'main_thread_only',
'split_thread',
'capture_nearestloopcount',
'capture_nearestloopcontinue',
'capture_nearestloopabort',
'io_file_o_rdonly',
'io_file_o_wronly',
'io_file_o_rdwr',
'io_file_o_nonblock',
'io_file_o_sync',
'io_file_o_shlock',
'io_file_o_exlock',
'io_file_o_async',
'io_file_o_fsync',
'io_file_o_nofollow',
'io_file_s_irwxu',
'io_file_s_irusr',
'io_file_s_iwusr',
'io_file_s_ixusr',
'io_file_s_irwxg',
'io_file_s_irgrp',
'io_file_s_iwgrp',
'io_file_s_ixgrp',
'io_file_s_irwxo',
'io_file_s_iroth',
'io_file_s_iwoth',
'io_file_s_ixoth',
'io_file_s_isuid',
'io_file_s_isgid',
'io_file_s_isvtx',
'io_file_s_ifmt',
'io_file_s_ifchr',
'io_file_s_ifdir',
'io_file_s_ifreg',
'io_file_o_append',
'io_file_o_creat',
'io_file_o_trunc',
'io_file_o_excl',
'io_file_seek_set',
'io_file_seek_cur',
'io_file_seek_end',
'io_file_s_ififo',
'io_file_s_ifblk',
'io_file_s_iflnk',
'io_file_s_ifsock',
'io_net_shut_rd',
'io_net_shut_wr',
'io_net_shut_rdwr',
'io_net_sock_stream',
'io_net_sock_dgram',
'io_net_sock_raw',
'io_net_sock_rdm',
'io_net_sock_seqpacket',
'io_net_so_debug',
'io_net_so_acceptconn',
'io_net_so_reuseaddr',
'io_net_so_keepalive',
'io_net_so_dontroute',
'io_net_so_broadcast',
'io_net_so_useloopback',
'io_net_so_linger',
'io_net_so_oobinline',
'io_net_so_timestamp',
'io_net_so_sndbuf',
'io_net_so_rcvbuf',
'io_net_so_sndlowat',
'io_net_so_rcvlowat',
'io_net_so_sndtimeo',
'io_net_so_rcvtimeo',
'io_net_so_error',
'io_net_so_type',
'io_net_sol_socket',
'io_net_af_unix',
'io_net_af_inet',
'io_net_af_inet6',
'io_net_ipproto_ip',
'io_net_ipproto_udp',
'io_net_msg_peek',
'io_net_msg_oob',
'io_net_msg_waitall',
'io_file_fioclex',
'io_file_fionclex',
'io_file_fionread',
'io_file_fionbio',
'io_file_fioasync',
'io_file_fiosetown',
'io_file_fiogetown',
'io_file_fiodtype',
'io_file_f_dupfd',
'io_file_f_getfd',
'io_file_f_setfd',
'io_file_f_getfl',
'io_file_f_setfl',
'io_file_f_getlk',
'io_file_f_setlk',
'io_file_f_setlkw',
'io_file_fd_cloexec',
'io_file_f_rdlck',
'io_file_f_unlck',
'io_file_f_wrlck',
'io_dir_dt_unknown',
'io_dir_dt_fifo',
'io_dir_dt_chr',
'io_dir_dt_blk',
'io_dir_dt_reg',
'io_dir_dt_sock',
'io_dir_dt_wht',
'io_dir_dt_lnk',
'io_dir_dt_dir',
'io_file_access',
'io_file_chdir',
'io_file_getcwd',
'io_file_chown',
'io_file_lchown',
'io_file_truncate',
'io_file_link',
'io_file_pipe',
'io_file_rmdir',
'io_file_symlink',
'io_file_unlink',
'io_file_remove',
'io_file_rename',
'io_file_tempnam',
'io_file_mkstemp',
'io_file_dirname',
'io_file_realpath',
'io_file_chmod',
'io_file_mkdir',
'io_file_mkfifo',
'io_file_umask',
'io_net_socket',
'io_net_bind',
'io_net_connect',
'io_net_listen',
'io_net_recv',
'io_net_recvfrom',
'io_net_accept',
'io_net_send',
'io_net_sendto',
'io_net_shutdown',
'io_net_getpeername',
'io_net_getsockname',
'io_net_ssl_begin',
'io_net_ssl_end',
'io_net_ssl_shutdown',
'io_net_ssl_setverifylocations',
'io_net_ssl_usecertificatechainfile',
'io_net_ssl_useprivatekeyfile',
'io_net_ssl_connect',
'io_net_ssl_accept',
'io_net_ssl_error',
'io_net_ssl_errorstring',
'io_net_ssl_liberrorstring',
'io_net_ssl_funcerrorstring',
'io_net_ssl_reasonerrorstring',
'io_net_ssl_setconnectstate',
'io_net_ssl_setacceptstate',
'io_net_ssl_read',
'io_net_ssl_write',
'io_file_stat_size',
'io_file_stat_mode',
'io_file_stat_mtime',
'io_file_stat_atime',
'io_file_lstat_size',
'io_file_lstat_mode',
'io_file_lstat_mtime',
'io_file_lstat_atime',
'io_file_readlink',
'io_file_lockf',
'io_file_f_ulock',
'io_file_f_tlock',
'io_file_f_test',
'io_file_stdin',
'io_file_stdout',
'io_file_stderr',
'uchar_alphabetic',
'uchar_ascii_hex_digit',
'uchar_bidi_control',
'uchar_bidi_mirrored',
'uchar_dash',
'uchar_default_ignorable_code_point',
'uchar_deprecated',
'uchar_diacritic',
'uchar_extender',
'uchar_full_composition_exclusion',
'uchar_grapheme_base',
'uchar_grapheme_extend',
'uchar_grapheme_link',
'uchar_hex_digit',
'uchar_hyphen',
'uchar_id_continue',
'uchar_ideographic',
'uchar_ids_binary_operator',
'uchar_ids_trinary_operator',
'uchar_join_control',
'uchar_logical_order_exception',
'uchar_lowercase',
'uchar_math',
'uchar_noncharacter_code_point',
'uchar_quotation_mark',
'uchar_radical',
'uchar_soft_dotted',
'uchar_terminal_punctuation',
'uchar_unified_ideograph',
'uchar_uppercase',
'uchar_white_space',
'uchar_xid_continue',
'uchar_case_sensitive',
'uchar_s_term',
'uchar_variation_selector',
'uchar_nfd_inert',
'uchar_nfkd_inert',
'uchar_nfc_inert',
'uchar_nfkc_inert',
'uchar_segment_starter',
'uchar_pattern_syntax',
'uchar_pattern_white_space',
'uchar_posix_alnum',
'uchar_posix_blank',
'uchar_posix_graph',
'uchar_posix_print',
'uchar_posix_xdigit',
'uchar_bidi_class',
'uchar_block',
'uchar_canonical_combining_class',
'uchar_decomposition_type',
'uchar_east_asian_width',
'uchar_general_category',
'uchar_joining_group',
'uchar_joining_type',
'uchar_line_break',
'uchar_numeric_type',
'uchar_script',
'uchar_hangul_syllable_type',
'uchar_nfd_quick_check',
'uchar_nfkd_quick_check',
'uchar_nfc_quick_check',
'uchar_nfkc_quick_check',
'uchar_lead_canonical_combining_class',
'uchar_trail_canonical_combining_class',
'uchar_grapheme_cluster_break',
'uchar_sentence_break',
'uchar_word_break',
'uchar_general_category_mask',
'uchar_numeric_value',
'uchar_age',
'uchar_bidi_mirroring_glyph',
'uchar_case_folding',
'uchar_iso_comment',
'uchar_lowercase_mapping',
'uchar_name',
'uchar_simple_case_folding',
'uchar_simple_lowercase_mapping',
'uchar_simple_titlecase_mapping',
'uchar_simple_uppercase_mapping',
'uchar_titlecase_mapping',
'uchar_unicode_1_name',
'uchar_uppercase_mapping',
'u_wb_other',
'u_wb_aletter',
'u_wb_format',
'u_wb_katakana',
'u_wb_midletter',
'u_wb_midnum',
'u_wb_numeric',
'u_wb_extendnumlet',
'u_sb_other',
'u_sb_aterm',
'u_sb_close',
'u_sb_format',
'u_sb_lower',
'u_sb_numeric',
'u_sb_oletter',
'u_sb_sep',
'u_sb_sp',
'u_sb_sterm',
'u_sb_upper',
'u_lb_unknown',
'u_lb_ambiguous',
'u_lb_alphabetic',
'u_lb_break_both',
'u_lb_break_after',
'u_lb_break_before',
'u_lb_mandatory_break',
'u_lb_contingent_break',
'u_lb_close_punctuation',
'u_lb_combining_mark',
'u_lb_carriage_return',
'u_lb_exclamation',
'u_lb_glue',
'u_lb_hyphen',
'u_lb_ideographic',
'u_lb_inseparable',
'u_lb_infix_numeric',
'u_lb_line_feed',
'u_lb_nonstarter',
'u_lb_numeric',
'u_lb_open_punctuation',
'u_lb_postfix_numeric',
'u_lb_prefix_numeric',
'u_lb_quotation',
'u_lb_complex_context',
'u_lb_surrogate',
'u_lb_space',
'u_lb_break_symbols',
'u_lb_zwspace',
'u_lb_next_line',
'u_lb_word_joiner',
'u_lb_h2',
'u_lb_h3',
'u_lb_jl',
'u_lb_jt',
'u_lb_jv',
'u_nt_none',
'u_nt_decimal',
'u_nt_digit',
'u_nt_numeric',
'locale_english',
'locale_french',
'locale_german',
'locale_italian',
'locale_japanese',
'locale_korean',
'locale_chinese',
'locale_simplifiedchinese',
'locale_traditionalchinese',
'locale_france',
'locale_germany',
'locale_italy',
'locale_japan',
'locale_korea',
'locale_china',
'locale_prc',
'locale_taiwan',
'locale_uk',
'locale_us',
'locale_canada',
'locale_canadafrench',
'locale_default',
'locale_setdefault',
'locale_isocountries',
'locale_isolanguages',
'locale_availablelocales',
'ucal_listtimezones',
'ucal_era',
'ucal_year',
'ucal_month',
'ucal_weekofyear',
'ucal_weekofmonth',
'ucal_dayofmonth',
'ucal_dayofyear',
'ucal_dayofweek',
'ucal_dayofweekinmonth',
'ucal_ampm',
'ucal_hour',
'ucal_hourofday',
'ucal_minute',
'ucal_second',
'ucal_millisecond',
'ucal_zoneoffset',
'ucal_dstoffset',
'ucal_yearwoy',
'ucal_dowlocal',
'ucal_extendedyear',
'ucal_julianday',
'ucal_millisecondsinday',
'ucal_lenient',
'ucal_firstdayofweek',
'ucal_daysinfirstweek',
'sys_sigalrm',
'sys_sighup',
'sys_sigkill',
'sys_sigpipe',
'sys_sigquit',
'sys_sigusr1',
'sys_sigusr2',
'sys_sigchld',
'sys_sigcont',
'sys_sigstop',
'sys_sigtstp',
'sys_sigttin',
'sys_sigttou',
'sys_sigbus',
'sys_sigprof',
'sys_sigsys',
'sys_sigtrap',
'sys_sigurg',
'sys_sigvtalrm',
'sys_sigxcpu',
'sys_sigxfsz',
'sys_wcontinued',
'sys_wnohang',
'sys_wuntraced',
'sys_sigabrt',
'sys_sigfpe',
'sys_sigill',
'sys_sigint',
'sys_sigsegv',
'sys_sigterm',
'sys_exit',
'sys_fork',
'sys_kill',
'sys_waitpid',
'sys_getegid',
'sys_geteuid',
'sys_getgid',
'sys_getlogin',
'sys_getpid',
'sys_getppid',
'sys_getuid',
'sys_setuid',
'sys_setgid',
'sys_setsid',
'sys_errno',
'sys_strerror',
'sys_time',
'sys_difftime',
'sys_getpwuid',
'sys_getpwnam',
'sys_getgrnam',
'sys_drand48',
'sys_erand48',
'sys_jrand48',
'sys_lcong48',
'sys_lrand48',
'sys_mrand48',
'sys_nrand48',
'sys_srand48',
'sys_random',
'sys_srandom',
'sys_seed48',
'sys_rand',
'sys_srand',
'sys_environ',
'sys_getenv',
'sys_setenv',
'sys_unsetenv',
'sys_uname',
'uuid_compare',
'uuid_copy',
'uuid_generate',
'uuid_generate_random',
'uuid_generate_time',
'uuid_is_null',
'uuid_parse',
'uuid_unparse',
'uuid_unparse_lower',
'uuid_unparse_upper',
'sys_credits',
'sleep',
'sys_dll_ext',
'sys_listtypes',
'sys_listtraits',
'sys_listunboundmethods',
'sys_getthreadcount',
'sys_growheapby',
'sys_getheapsize',
'sys_getheapfreebytes',
'sys_getbytessincegc',
'sys_garbagecollect',
'sys_clock',
'sys_getstartclock',
'sys_clockspersec',
'sys_pointersize',
'sys_loadlibrary',
'sys_getchar',
'sys_chroot',
'sys_exec',
'sys_kill_exec',
'sys_wait_exec',
'sys_test_exec',
'sys_detach_exec',
'sys_pid_exec',
'wifexited',
'wexitstatus',
'wifsignaled',
'wtermsig',
'wifstopped',
'wstopsig',
'wifcontinued',
'sys_eol',
'sys_iswindows',
'sys_is_windows',
'sys_isfullpath',
'sys_is_full_path',
'lcapi_loadmodule',
'lcapi_listdatasources',
'encrypt_blowfish',
'decrypt_blowfish',
'cipher_digest',
'cipher_encrypt',
'cipher_decrypt',
'cipher_list',
'cipher_keylength',
'cipher_hmac',
'cipher_seal',
'cipher_open',
'cipher_sign',
'cipher_verify',
'cipher_decrypt_private',
'cipher_decrypt_public',
'cipher_encrypt_private',
'cipher_encrypt_public',
'cipher_generate_key',
'tag_exists',
'curl_easy_init',
'curl_easy_duphandle',
'curl_easy_cleanup',
'curl_easy_getinfo',
'curl_multi_perform',
'curl_multi_result',
'curl_easy_reset',
'curl_easy_setopt',
'curl_easy_strerror',
'curl_getdate',
'curl_version',
'curl_version_info',
'curlinfo_effective_url',
'curlinfo_content_type',
'curlinfo_response_code',
'curlinfo_header_size',
'curlinfo_request_size',
'curlinfo_ssl_verifyresult',
'curlinfo_filetime',
'curlinfo_redirect_count',
'curlinfo_http_connectcode',
'curlinfo_httpauth_avail',
'curlinfo_proxyauth_avail',
'curlinfo_os_errno',
'curlinfo_num_connects',
'curlinfo_total_time',
'curlinfo_namelookup_time',
'curlinfo_connect_time',
'curlinfo_pretransfer_time',
'curlinfo_size_upload',
'curlinfo_size_download',
'curlinfo_speed_download',
'curlinfo_speed_upload',
'curlinfo_content_length_download',
'curlinfo_content_length_upload',
'curlinfo_starttransfer_time',
'curlinfo_redirect_time',
'curlinfo_ssl_engines',
'curlopt_url',
'curlopt_postfields',
'curlopt_cainfo',
'curlopt_capath',
'curlopt_cookie',
'curlopt_cookiefile',
'curlopt_cookiejar',
'curlopt_customrequest',
'curlopt_egdsocket',
'curlopt_encoding',
'curlopt_ftp_account',
'curlopt_ftpport',
'curlopt_interface',
'curlopt_krb4level',
'curlopt_netrc_file',
'curlopt_proxy',
'curlopt_proxyuserpwd',
'curlopt_random_file',
'curlopt_range',
'curlopt_readdata',
'curlopt_referer',
'curlopt_ssl_cipher_list',
'curlopt_sslcert',
'curlopt_sslcerttype',
'curlopt_sslengine',
'curlopt_sslkey',
'curlopt_sslkeypasswd',
'curlopt_sslkeytype',
'curlopt_useragent',
'curlopt_userpwd',
'curlopt_postfieldsize',
'curlopt_autoreferer',
'curlopt_buffersize',
'curlopt_connecttimeout',
'curlopt_cookiesession',
'curlopt_crlf',
'curlopt_dns_use_global_cache',
'curlopt_failonerror',
'curlopt_filetime',
'curlopt_followlocation',
'curlopt_forbid_reuse',
'curlopt_fresh_connect',
'curlopt_ftp_create_missing_dirs',
'curlopt_ftp_response_timeout',
'curlopt_ftp_ssl',
'curlopt_use_ssl',
'curlopt_ftp_use_eprt',
'curlopt_ftp_use_epsv',
'curlopt_ftpappend',
'curlopt_ftplistonly',
'curlopt_ftpsslauth',
'curlopt_header',
'curlopt_http_version',
'curlopt_httpauth',
'curlopt_httpget',
'curlopt_httpproxytunnel',
'curlopt_infilesize',
'curlopt_ipresolve',
'curlopt_low_speed_limit',
'curlopt_low_speed_time',
'curlopt_maxconnects',
'curlopt_maxfilesize',
'curlopt_maxredirs',
'curlopt_netrc',
'curlopt_nobody',
'curlopt_noprogress',
'curlopt_port',
'curlopt_post',
'curlopt_proxyauth',
'curlopt_proxyport',
'curlopt_proxytype',
'curlopt_put',
'curlopt_resume_from',
'curlopt_ssl_verifyhost',
'curlopt_ssl_verifypeer',
'curlopt_sslengine_default',
'curlopt_sslversion',
'curlopt_tcp_nodelay',
'curlopt_timecondition',
'curlopt_timeout',
'curlopt_timevalue',
'curlopt_transfertext',
'curlopt_unrestricted_auth',
'curlopt_upload',
'curlopt_verbose',
'curlopt_infilesize_large',
'curlopt_maxfilesize_large',
'curlopt_postfieldsize_large',
'curlopt_resume_from_large',
'curlopt_http200aliases',
'curlopt_httpheader',
'curlopt_postquote',
'curlopt_prequote',
'curlopt_quote',
'curlopt_httppost',
'curlopt_writedata',
'curl_version_ipv6',
'curl_version_kerberos4',
'curl_version_ssl',
'curl_version_libz',
'curl_version_ntlm',
'curl_version_gssnegotiate',
'curl_version_debug',
'curl_version_asynchdns',
'curl_version_spnego',
'curl_version_largefile',
'curl_version_idn',
'curl_netrc_ignored',
'curl_netrc_optional',
'curl_netrc_required',
'curl_http_version_none',
'curl_http_version_1_0',
'curl_http_version_1_1',
'curl_ipresolve_whatever',
'curl_ipresolve_v4',
'curl_ipresolve_v6',
'curlftpssl_none',
'curlftpssl_try',
'curlftpssl_control',
'curlftpssl_all',
'curlftpssl_last',
'curlftpauth_default',
'curlftpauth_ssl',
'curlftpauth_tls',
'curlauth_none',
'curlauth_basic',
'curlauth_digest',
'curlauth_gssnegotiate',
'curlauth_ntlm',
'curlauth_any',
'curlauth_anysafe',
'curlproxy_http',
'curlproxy_socks4',
'curlproxy_socks5',
'curle_ok',
'curle_unsupported_protocol',
'curle_failed_init',
'curle_url_malformat',
'curle_url_malformat_user',
'curle_couldnt_resolve_proxy',
'curle_couldnt_resolve_host',
'curle_couldnt_connect',
'curle_ftp_weird_server_reply',
'curle_ftp_access_denied',
'curle_ftp_user_password_incorrect',
'curle_ftp_weird_pass_reply',
'curle_ftp_weird_user_reply',
'curle_ftp_weird_pasv_reply',
'curle_ftp_weird_227_format',
'curle_ftp_cant_get_host',
'curle_ftp_cant_reconnect',
'curle_ftp_couldnt_set_binary',
'curle_partial_file',
'curle_ftp_couldnt_retr_file',
'curle_ftp_write_error',
'curle_ftp_quote_error',
'curle_http_returned_error',
'curle_write_error',
'curle_malformat_user',
'curle_read_error',
'curle_out_of_memory',
'curle_operation_timeouted',
'curle_ftp_couldnt_set_ascii',
'curle_ftp_port_failed',
'curle_ftp_couldnt_use_rest',
'curle_ftp_couldnt_get_size',
'curle_http_range_error',
'curle_http_post_error',
'curle_ssl_connect_error',
'curle_bad_download_resume',
'curle_file_couldnt_read_file',
'curle_ldap_cannot_bind',
'curle_ldap_search_failed',
'curle_library_not_found',
'curle_function_not_found',
'curle_aborted_by_callback',
'curle_bad_function_argument',
'curle_bad_calling_order',
'curle_interface_failed',
'curle_bad_password_entered',
'curle_too_many_redirects',
'curle_unknown_telnet_option',
'curle_telnet_option_syntax',
'curle_obsolete',
'curle_ssl_peer_certificate',
'curle_got_nothing',
'curle_ssl_engine_notfound',
'curle_ssl_engine_setfailed',
'curle_send_error',
'curle_recv_error',
'curle_share_in_use',
'curle_ssl_certproblem',
'curle_ssl_cipher',
'curle_ssl_cacert',
'curle_bad_content_encoding',
'curle_ldap_invalid_url',
'curle_filesize_exceeded',
'curle_ftp_ssl_failed',
'curle_send_fail_rewind',
'curle_ssl_engine_initfailed',
'curle_login_denied',
'curlmsg_done',
'zip_open',
'zip_name_locate',
'zip_fopen',
'zip_fopen_index',
'zip_fread',
'zip_fclose',
'zip_close',
'zip_stat',
'zip_stat_index',
'zip_get_archive_comment',
'zip_get_file_comment',
'zip_get_name',
'zip_get_num_files',
'zip_add',
'zip_replace',
'zip_add_dir',
'zip_set_file_comment',
'zip_rename',
'zip_delete',
'zip_unchange',
'zip_unchange_all',
'zip_unchange_archive',
'zip_set_archive_comment',
'zip_error_to_str',
'zip_file_strerror',
'zip_strerror',
'zip_error_get',
'zip_file_error_get',
'zip_error_get_sys_type',
'zlib_version',
'fastcgi_initiate_request',
'debugging_enabled',
'debugging_stop',
'evdns_resolve_ipv4',
'evdns_resolve_ipv6',
'evdns_resolve_reverse',
'evdns_resolve_reverse_ipv6',
'stdout',
'stdoutnl',
'fail',
'fail_if',
'fail_ifnot',
'error_code',
'error_msg',
'error_obj',
'error_stack',
'error_push',
'error_pop',
'error_reset',
'error_msg_invalidparameter',
'error_code_invalidparameter',
'error_msg_networkerror',
'error_code_networkerror',
'error_msg_runtimeassertion',
'error_code_runtimeassertion',
'error_msg_methodnotfound',
'error_code_methodnotfound',
'error_msg_resnotfound',
'error_code_resnotfound',
'error_msg_filenotfound',
'error_code_filenotfound',
'error_msg_aborted',
'error_code_aborted',
'error_msg_dividebyzero',
'error_code_dividebyzero',
'error_msg_noerror',
'error_code_noerror',
'abort',
'protect',
'generateforeach',
'method_name',
'queriable_do',
'queriable_sum',
'queriable_average',
'queriable_min',
'queriable_max',
'queriable_internal_combinebindings',
'queriable_defaultcompare',
'queriable_reversecompare',
'queriable_qsort',
'timer',
'thread_var_push',
'thread_var_pop',
'thread_var_get',
'loop_value',
'loop_value_push',
'loop_value_pop',
'loop_key',
'loop_key_push',
'loop_key_pop',
'loop_push',
'loop_pop',
'loop_count',
'loop_continue',
'loop_abort',
'loop',
'sys_while',
'sys_iterate',
'string_validcharset',
'eol',
'encoding_utf8',
'encoding_iso88591',
'integer_random',
'integer_bitor',
'millis',
'micros',
'max',
'min',
'range',
'median',
'decimal_random',
'pi',
'lcapi_datasourceinit',
'lcapi_datasourceterm',
'lcapi_datasourcenames',
'lcapi_datasourcetablenames',
'lcapi_datasourcesearch',
'lcapi_datasourceadd',
'lcapi_datasourceupdate',
'lcapi_datasourcedelete',
'lcapi_datasourceinfo',
'lcapi_datasourceexecsql',
'lcapi_datasourcerandom',
'lcapi_datasourceschemanames',
'lcapi_datasourcecloseconnection',
'lcapi_datasourcetickle',
'lcapi_datasourceduplicate',
'lcapi_datasourcescripts',
'lcapi_datasourceimage',
'lcapi_datasourcefindall',
'lcapi_datasourcematchesname',
'lcapi_datasourcepreparesql',
'lcapi_datasourceunpreparesql',
'lcapi_datasourcenothing',
'lcapi_fourchartointeger',
'lcapi_datasourcetypestring',
'lcapi_datasourcetypeinteger',
'lcapi_datasourcetypeboolean',
'lcapi_datasourcetypeblob',
'lcapi_datasourcetypedecimal',
'lcapi_datasourcetypedate',
'lcapi_datasourceprotectionnone',
'lcapi_datasourceprotectionreadonly',
'lcapi_datasourceopgt',
'lcapi_datasourceopgteq',
'lcapi_datasourceopeq',
'lcapi_datasourceopneq',
'lcapi_datasourceoplt',
'lcapi_datasourceoplteq',
'lcapi_datasourceopbw',
'lcapi_datasourceopew',
'lcapi_datasourceopct',
'lcapi_datasourceopnct',
'lcapi_datasourceopnbw',
'lcapi_datasourceopnew',
'lcapi_datasourceopand',
'lcapi_datasourceopor',
'lcapi_datasourceopnot',
'lcapi_datasourceopno',
'lcapi_datasourceopany',
'lcapi_datasourceopin',
'lcapi_datasourceopnin',
'lcapi_datasourceopft',
'lcapi_datasourceoprx',
'lcapi_datasourceopnrx',
'lcapi_datasourcesortascending',
'lcapi_datasourcesortdescending',
'lcapi_datasourcesortcustom',
'lcapi_loadmodules',
'lasso_version',
'lasso_uniqueid',
'usage',
'file_defaultencoding',
'file_copybuffersize',
'file_modeline',
'file_modechar',
'file_forceroot',
'file_tempfile',
'file_stdin',
'file_stdout',
'file_stderr',
'lasso_tagexists',
'lasso_methodexists',
'output',
'if_empty',
'if_null',
'if_true',
'if_false',
'process',
'treemap',
'locale_format',
'compress',
'uncompress',
'decompress',
'tag_name',
'series',
'nslookup',
'all',
'bw',
'cn',
'eq',
'ew',
'ft',
'gt',
'gte',
'lt',
'lte',
'neq',
'nrx',
'rx',
'none',
'minimal',
'full',
'output_none',
'lasso_executiontimelimit',
'namespace_global',
'namespace_using',
'namespace_import',
'site_id',
'site_name',
'sys_homepath',
'sys_masterhomepath',
'sys_supportpath',
'sys_librariespath',
'sys_databasespath',
'sys_usercapimodulepath',
'sys_appspath',
'sys_userstartuppath',
'ldap_scope_base',
'ldap_scope_onelevel',
'ldap_scope_subtree',
'mysqlds',
'odbc',
'sqliteconnector',
'sqlite_createdb',
'sqlite_setsleepmillis',
'sqlite_setsleeptries',
'java_jvm_getenv',
'java_jvm_create',
'java_jdbc_load',
'database_database',
'database_table_datasources',
'database_table_datasource_hosts',
'database_table_datasource_databases',
'database_table_database_tables',
'database_table_table_fields',
'database_qs',
'database_initialize',
'database_util_cleanpath',
'database_adddefaultsqlitehost',
'sqlite_ok',
'sqlite_error',
'sqlite_internal',
'sqlite_perm',
'sqlite_abort',
'sqlite_busy',
'sqlite_locked',
'sqlite_nomem',
'sqlite_readonly',
'sqlite_interrupt',
'sqlite_ioerr',
'sqlite_corrupt',
'sqlite_notfound',
'sqlite_full',
'sqlite_cantopen',
'sqlite_protocol',
'sqlite_empty',
'sqlite_schema',
'sqlite_toobig',
'sqlite_constraint',
'sqlite_mismatch',
'sqlite_misuse',
'sqlite_nolfs',
'sqlite_auth',
'sqlite_format',
'sqlite_range',
'sqlite_notadb',
'sqlite_row',
'sqlite_done',
'sqlite_integer',
'sqlite_float',
'sqlite_blob',
'sqlite_null',
'sqlite_text',
'bom_utf16be',
'bom_utf16le',
'bom_utf32be',
'bom_utf32le',
'bom_utf8',
'include_url',
'ftp_getdata',
'ftp_getfile',
'ftp_getlisting',
'ftp_putdata',
'ftp_putfile',
'ftp_deletefile',
'debugging_step_in',
'debugging_get_stack',
'debugging_get_context',
'debugging_detach',
'debugging_step_over',
'debugging_step_out',
'debugging_run',
'debugging_break',
'debugging_breakpoint_set',
'debugging_breakpoint_get',
'debugging_breakpoint_remove',
'debugging_breakpoint_list',
'debugging_breakpoint_update',
'debugging_terminate',
'debugging_context_locals',
'debugging_context_vars',
'debugging_context_self',
'dbgp_stop_stack_name',
'encrypt_md5',
'inline_columninfo_pos',
'inline_resultrows_pos',
'inline_foundcount_pos',
'inline_colinfo_name_pos',
'inline_colinfo_valuelist_pos',
'inline_scopeget',
'inline_scopepush',
'inline_scopepop',
'inline_namedget',
'inline_namedput',
'inline',
'resultset_count',
'resultset',
'resultsets',
'rows',
'rows_impl',
'records',
'column',
'field',
'column_names',
'field_names',
'column_name',
'field_name',
'found_count',
'shown_count',
'shown_first',
'shown_last',
'action_statement',
'lasso_currentaction',
'maxrecords_value',
'skiprecords_value',
'action_param',
'action_params',
'admin_authorization',
'admin_currentgroups',
'admin_currentuserid',
'admin_currentusername',
'database_name',
'table_name',
'layout_name',
'schema_name',
'keycolumn_name',
'keyfield_name',
'keycolumn_value',
'keyfield_value',
'inline_colinfo_type_pos',
'column_type',
'rows_array',
'records_array',
'records_map',
'json_serialize',
'json_consume_string',
'json_consume_token',
'json_consume_array',
'json_consume_object',
'json_deserialize',
'json_rpccall',
'ljapi_initialize',
'locale_format_style_full',
'locale_format_style_long',
'locale_format_style_medium',
'locale_format_style_short',
'locale_format_style_default',
'locale_format_style_none',
'locale_format_style_date_time',
'net_connectinprogress',
'net_connectok',
'net_typessl',
'net_typessltcp',
'net_typessludp',
'net_typetcp',
'net_typeudp',
'net_waitread',
'net_waittimeout',
'net_waitwrite',
'admin_initialize',
'admin_getpref',
'admin_setpref',
'admin_removepref',
'admin_userexists',
'admin_lassoservicepath',
'pdf_package',
'pdf_rectangle',
'pdf_serve',
'random_seed',
'xml',
'xml_transform',
'zip_create',
'zip_excl',
'zip_checkcons',
'zip_fl_nocase',
'zip_fl_nodir',
'zip_fl_compressed',
'zip_fl_unchanged',
'zip_er_ok',
'zip_er_multidisk',
'zip_er_rename',
'zip_er_close',
'zip_er_seek',
'zip_er_read',
'zip_er_write',
'zip_er_crc',
'zip_er_zipclosed',
'zip_er_noent',
'zip_er_exists',
'zip_er_open',
'zip_er_tmpopen',
'zip_er_zlib',
'zip_er_memory',
'zip_er_changed',
'zip_er_compnotsupp',
'zip_er_eof',
'zip_er_inval',
'zip_er_nozip',
'zip_er_internal',
'zip_er_incons',
'zip_er_remove',
'zip_er_deleted',
'zip_et_none',
'zip_et_sys',
'zip_et_zlib',
'zip_cm_default',
'zip_cm_store',
'zip_cm_shrink',
'zip_cm_reduce_1',
'zip_cm_reduce_2',
'zip_cm_reduce_3',
'zip_cm_reduce_4',
'zip_cm_implode',
'zip_cm_deflate',
'zip_cm_deflate64',
'zip_cm_pkware_implode',
'zip_cm_bzip2',
'zip_em_none',
'zip_em_trad_pkware',
'zip_em_des',
'zip_em_rc2_old',
'zip_em_3des_168',
'zip_em_3des_112',
'zip_em_aes_128',
'zip_em_aes_192',
'zip_em_aes_256',
'zip_em_rc2',
'zip_em_rc4',
'zip_em_unknown',
'dns_lookup',
'dns_default',
'string_charfromname',
'string_concatenate',
'string_endswith',
'string_extract',
'string_findposition',
'string_findregexp',
'string_getunicodeversion',
'string_insert',
'string_isalpha',
'string_isalphanumeric',
'string_isdigit',
'string_ishexdigit',
'string_islower',
'string_isnumeric',
'string_ispunctuation',
'string_isspace',
'string_isupper',
'string_length',
'string_remove',
'string_removeleading',
'string_removetrailing',
'string_replace',
'string_replaceregexp',
'string_todecimal',
'string_tointeger',
'string_uppercase',
'string_lowercase',
'document',
'email_attachment_mime_type',
'email_translatebreakstocrlf',
'email_findemails',
'email_fix_address',
'email_fix_address_list',
'encode_qheader',
'email_send',
'email_queue',
'email_immediate',
'email_result',
'email_status',
'email_token',
'email_merge',
'email_batch',
'email_safeemail',
'email_extract',
'email_pop_priv_substring',
'email_pop_priv_extract',
'email_digestchallenge',
'email_pop_priv_quote',
'email_digestresponse',
'encrypt_hmac',
'encrypt_crammd5',
'email_fs_error_clean',
'email_initialize',
'email_mxlookup',
'lasso_errorreporting',
'fcgi_version_1',
'fcgi_null_request_id',
'fcgi_begin_request',
'fcgi_abort_request',
'fcgi_end_request',
'fcgi_params',
'fcgi_stdin',
'fcgi_stdout',
'fcgi_stderr',
'fcgi_data',
'fcgi_get_values',
'fcgi_get_values_result',
'fcgi_unknown_type',
'fcgi_keep_conn',
'fcgi_responder',
'fcgi_authorize',
'fcgi_filter',
'fcgi_request_complete',
'fcgi_cant_mpx_conn',
'fcgi_overloaded',
'fcgi_unknown_role',
'fcgi_max_conns',
'fcgi_max_reqs',
'fcgi_mpxs_conns',
'fcgi_read_timeout_seconds',
'fcgi_makeendrequestbody',
'fcgi_bodychunksize',
'fcgi_makestdoutbody',
'fcgi_readparam',
'web_request',
'include_cache_compare',
'fastcgi_initialize',
'fastcgi_handlecon',
'fastcgi_handlereq',
'fastcgi_createfcgirequest',
'web_handlefcgirequest',
'filemakerds_initialize',
'filemakerds',
'value_listitem',
'valuelistitem',
'selected',
'checked',
'value_list',
'http_char_space',
'http_char_htab',
'http_char_cr',
'http_char_lf',
'http_char_question',
'http_char_colon',
'http_read_timeout_secs',
'http_default_files',
'http_server_apps_path',
'jdbc_initialize',
'lassoapp_settingsdb',
'lassoapp_format_mod_date',
'lassoapp_include_current',
'lassoapp_include',
'lassoapp_find_missing_file',
'lassoapp_get_capabilities_name',
'lassoapp_exists',
'lassoapp_path_to_method_name',
'lassoapp_invoke_resource',
'lassoapp_initialize_db',
'lassoapp_initialize',
'lassoapp_issourcefileextension',
'lassoapp_current_include',
'lassoapp_current_app',
'lassoapp_do_with_include',
'lassoapp_link',
'lassoapp_load_module',
'lassoapp_mime_type_html',
'lassoapp_mime_type_lasso',
'lassoapp_mime_type_xml',
'lassoapp_mime_type_ppt',
'lassoapp_mime_type_js',
'lassoapp_mime_type_txt',
'lassoapp_mime_type_jpg',
'lassoapp_mime_type_png',
'lassoapp_mime_type_gif',
'lassoapp_mime_type_css',
'lassoapp_mime_type_csv',
'lassoapp_mime_type_tif',
'lassoapp_mime_type_ico',
'lassoapp_mime_type_rss',
'lassoapp_mime_type_xhr',
'lassoapp_mime_type_pdf',
'lassoapp_mime_type_docx',
'lassoapp_mime_type_doc',
'lassoapp_mime_type_zip',
'lassoapp_mime_type_svg',
'lassoapp_mime_type_ttf',
'lassoapp_mime_type_woff',
'lassoapp_mime_type_swf',
'lassoapp_mime_get',
'log_level_critical',
'log_level_warning',
'log_level_detail',
'log_level_sql',
'log_level_deprecated',
'log_destination_console',
'log_destination_file',
'log_destination_database',
'log',
'log_setdestination',
'log_always',
'log_critical',
'log_warning',
'log_detail',
'log_sql',
'log_deprecated',
'log_max_file_size',
'log_trim_file_size',
'log_initialize',
'portal',
'security_database',
'security_table_groups',
'security_table_users',
'security_table_ug_map',
'security_default_realm',
'security_initialize',
'session_initialize',
'session_getdefaultdriver',
'session_setdefaultdriver',
'session_start',
'session_addvar',
'session_removevar',
'session_end',
'session_id',
'session_abort',
'session_result',
'session_deleteexpired',
'odbc_session_driver_mssql',
'session_decorate',
'auth_admin',
'auth_check',
'auth_custom',
'auth_group',
'auth_prompt',
'auth_user',
'client_addr',
'client_authorization',
'client_browser',
'client_contentlength',
'client_contenttype',
'client_cookielist',
'client_cookies',
'client_encoding',
'client_formmethod',
'client_getargs',
'client_getparams',
'client_getparam',
'client_headers',
'client_integertoip',
'client_iptointeger',
'client_password',
'client_postargs',
'client_postparams',
'client_postparam',
'client_type',
'client_username',
'client_url',
'referer_url',
'referrer_url',
'content_type',
'content_encoding',
'cookie',
'cookie_set',
'include',
'include_currentpath',
'include_filepath',
'include_localpath',
'include_once',
'include_path',
'include_raw',
'includes',
'library',
'library_once',
'response_filepath',
'response_localpath',
'response_path',
'response_realm',
'response_root',
'redirect_url',
'server_admin',
'server_name',
'server_ip',
'server_port',
'server_protocol',
'server_signature',
'server_software',
'server_push',
'token_value',
'wap_isenabled',
'wap_maxbuttons',
'wap_maxhorzpixels',
'wap_maxvertpixels',
'wap_maxcolumns',
'wap_maxrows',
'define_atbegin',
'define_atend',
'content_header',
'content_addheader',
'content_replaceheader',
'content_body',
'html_comment',
'web_node_forpath',
'web_nodes_requesthandler',
'web_nodes_normalizeextension',
'web_nodes_processcontentnode',
'web_nodes_initialize',
'web_node_content_representation_xhr',
'web_node_content_representation_html',
'web_node_content_representation_css',
'web_node_content_representation_js',
'web_response_nodesentry',
'web_response',
'web_router_database',
'web_router_initialize'
],
'Lasso 8 Tags': [
'__char',
'__sync_timestamp__',
'_admin_addgroup',
'_admin_adduser',
'_admin_defaultconnector',
'_admin_defaultconnectornames',
'_admin_defaultdatabase',
'_admin_defaultfield',
'_admin_defaultgroup',
'_admin_defaulthost',
'_admin_defaulttable',
'_admin_defaultuser',
'_admin_deleteconnector',
'_admin_deletedatabase',
'_admin_deletefield',
'_admin_deletegroup',
'_admin_deletehost',
'_admin_deletetable',
'_admin_deleteuser',
'_admin_duplicategroup',
'_admin_internaldatabase',
'_admin_listconnectors',
'_admin_listdatabases',
'_admin_listfields',
'_admin_listgroups',
'_admin_listhosts',
'_admin_listtables',
'_admin_listusers',
'_admin_refreshconnector',
'_admin_refreshsecurity',
'_admin_servicepath',
'_admin_updateconnector',
'_admin_updatedatabase',
'_admin_updatefield',
'_admin_updategroup',
'_admin_updatehost',
'_admin_updatetable',
'_admin_updateuser',
'_chartfx_activation_string',
'_chartfx_getchallengestring',
'_chop_args',
'_chop_mimes',
'_client_addr_old',
'_client_address_old',
'_client_ip_old',
'_database_names',
'_datasource_reload',
'_date_current',
'_date_format',
'_date_msec',
'_date_parse',
'_execution_timelimit',
'_file_chmod',
'_initialize',
'_jdbc_acceptsurl',
'_jdbc_debug',
'_jdbc_deletehost',
'_jdbc_driverclasses',
'_jdbc_driverinfo',
'_jdbc_metainfo',
'_jdbc_propertyinfo',
'_jdbc_setdriver',
'_lasso_param',
'_log_helper',
'_proc_noparam',
'_proc_withparam',
'_recursion_limit',
'_request_param',
'_security_binaryexpiration',
'_security_flushcaches',
'_security_isserialized',
'_security_serialexpiration',
'_srand',
'_strict_literals',
'_substring',
'_xmlrpc_exconverter',
'_xmlrpc_inconverter',
'_xmlrpc_xmlinconverter',
'abort',
'action_addinfo',
'action_addrecord',
'action_param',
'action_params',
'action_setfoundcount',
'action_setrecordid',
'action_settotalcount',
'action_statement',
'admin_allowedfileroots',
'admin_changeuser',
'admin_createuser',
'admin_currentgroups',
'admin_currentuserid',
'admin_currentusername',
'admin_getpref',
'admin_groupassignuser',
'admin_grouplistusers',
'admin_groupremoveuser',
'admin_lassoservicepath',
'admin_listgroups',
'admin_refreshlicensing',
'admin_refreshsecurity',
'admin_reloaddatasource',
'admin_removepref',
'admin_setpref',
'admin_userexists',
'admin_userlistgroups',
'all',
'and',
'array',
'array_iterator',
'auth',
'auth_admin',
'auth_auth',
'auth_custom',
'auth_group',
'auth_prompt',
'auth_user',
'base64',
'bean',
'bigint',
'bom_utf16be',
'bom_utf16le',
'bom_utf32be',
'bom_utf32le',
'bom_utf8',
'boolean',
'bw',
'bytes',
'cache',
'cache_delete',
'cache_empty',
'cache_exists',
'cache_fetch',
'cache_internal',
'cache_maintenance',
'cache_object',
'cache_preferences',
'cache_store',
'case',
'chartfx',
'chartfx_records',
'chartfx_serve',
'checked',
'choice_list',
'choice_listitem',
'choicelistitem',
'cipher_decrypt',
'cipher_digest',
'cipher_encrypt',
'cipher_hmac',
'cipher_keylength',
'cipher_list',
'click_text',
'client_addr',
'client_address',
'client_authorization',
'client_browser',
'client_contentlength',
'client_contenttype',
'client_cookielist',
'client_cookies',
'client_encoding',
'client_formmethod',
'client_getargs',
'client_getparams',
'client_headers',
'client_ip',
'client_ipfrominteger',
'client_iptointeger',
'client_password',
'client_postargs',
'client_postparams',
'client_type',
'client_url',
'client_username',
'cn',
'column',
'column_name',
'column_names',
'compare_beginswith',
'compare_contains',
'compare_endswith',
'compare_equalto',
'compare_greaterthan',
'compare_greaterthanorequals',
'compare_greaterthanorequls',
'compare_lessthan',
'compare_lessthanorequals',
'compare_notbeginswith',
'compare_notcontains',
'compare_notendswith',
'compare_notequalto',
'compare_notregexp',
'compare_regexp',
'compare_strictequalto',
'compare_strictnotequalto',
'compiler_removecacheddoc',
'compiler_setdefaultparserflags',
'compress',
'content_body',
'content_encoding',
'content_header',
'content_type',
'cookie',
'cookie_set',
'curl_ftp_getfile',
'curl_ftp_getlisting',
'curl_ftp_putfile',
'curl_include_url',
'currency',
'database_changecolumn',
'database_changefield',
'database_createcolumn',
'database_createfield',
'database_createtable',
'database_fmcontainer',
'database_hostinfo',
'database_inline',
'database_name',
'database_nameitem',
'database_names',
'database_realname',
'database_removecolumn',
'database_removefield',
'database_removetable',
'database_repeating',
'database_repeating_valueitem',
'database_repeatingvalueitem',
'database_schemanameitem',
'database_schemanames',
'database_tablecolumn',
'database_tablenameitem',
'database_tablenames',
'datasource_name',
'datasource_register',
'date',
'date__date_current',
'date__date_format',
'date__date_msec',
'date__date_parse',
'date_add',
'date_date',
'date_difference',
'date_duration',
'date_format',
'date_getcurrentdate',
'date_getday',
'date_getdayofweek',
'date_gethour',
'date_getlocaltimezone',
'date_getminute',
'date_getmonth',
'date_getsecond',
'date_gettime',
'date_getyear',
'date_gmttolocal',
'date_localtogmt',
'date_maximum',
'date_minimum',
'date_msec',
'date_setformat',
'date_subtract',
'db_layoutnameitem',
'db_layoutnames',
'db_nameitem',
'db_names',
'db_tablenameitem',
'db_tablenames',
'dbi_column_names',
'dbi_field_names',
'decimal',
'decimal_setglobaldefaultprecision',
'decode_base64',
'decode_bheader',
'decode_hex',
'decode_html',
'decode_json',
'decode_qheader',
'decode_quotedprintable',
'decode_quotedprintablebytes',
'decode_url',
'decode_xml',
'decompress',
'decrypt_blowfish',
'decrypt_blowfish2',
'default',
'define_atbegin',
'define_atend',
'define_constant',
'define_prototype',
'define_tag',
'define_tagp',
'define_type',
'define_typep',
'deserialize',
'directory_directorynameitem',
'directory_lister',
'directory_nameitem',
'directorynameitem',
'dns_default',
'dns_lookup',
'dns_response',
'duration',
'else',
'email_batch',
'email_compose',
'email_digestchallenge',
'email_digestresponse',
'email_extract',
'email_findemails',
'email_immediate',
'email_merge',
'email_mxerror',
'email_mxlookup',
'email_parse',
'email_pop',
'email_queue',
'email_result',
'email_safeemail',
'email_send',
'email_smtp',
'email_status',
'email_token',
'email_translatebreakstocrlf',
'encode_base64',
'encode_bheader',
'encode_break',
'encode_breaks',
'encode_crc32',
'encode_hex',
'encode_html',
'encode_htmltoxml',
'encode_json',
'encode_qheader',
'encode_quotedprintable',
'encode_quotedprintablebytes',
'encode_set',
'encode_smart',
'encode_sql',
'encode_sql92',
'encode_stricturl',
'encode_url',
'encode_xml',
'encrypt_blowfish',
'encrypt_blowfish2',
'encrypt_crammd5',
'encrypt_hmac',
'encrypt_md5',
'eq',
'error_adderror',
'error_code',
'error_code_aborted',
'error_code_assert',
'error_code_bof',
'error_code_connectioninvalid',
'error_code_couldnotclosefile',
'error_code_couldnotcreateoropenfile',
'error_code_couldnotdeletefile',
'error_code_couldnotdisposememory',
'error_code_couldnotlockmemory',
'error_code_couldnotreadfromfile',
'error_code_couldnotunlockmemory',
'error_code_couldnotwritetofile',
'error_code_criterianotmet',
'error_code_datasourceerror',
'error_code_directoryfull',
'error_code_diskfull',
'error_code_dividebyzero',
'error_code_eof',
'error_code_failure',
'error_code_fieldrestriction',
'error_code_file',
'error_code_filealreadyexists',
'error_code_filecorrupt',
'error_code_fileinvalid',
'error_code_fileinvalidaccessmode',
'error_code_fileisclosed',
'error_code_fileisopen',
'error_code_filelocked',
'error_code_filenotfound',
'error_code_fileunlocked',
'error_code_httpfilenotfound',
'error_code_illegalinstruction',
'error_code_illegaluseoffrozeninstance',
'error_code_invaliddatabase',
'error_code_invalidfilename',
'error_code_invalidmemoryobject',
'error_code_invalidparameter',
'error_code_invalidpassword',
'error_code_invalidpathname',
'error_code_invalidusername',
'error_code_ioerror',
'error_code_loopaborted',
'error_code_memory',
'error_code_network',
'error_code_nilpointer',
'error_code_noerr',
'error_code_nopermission',
'error_code_outofmemory',
'error_code_outofstackspace',
'error_code_overflow',
'error_code_postconditionfailed',
'error_code_preconditionfailed',
'error_code_resnotfound',
'error_code_resource',
'error_code_streamreaderror',
'error_code_streamwriteerror',
'error_code_syntaxerror',
'error_code_tagnotfound',
'error_code_unknownerror',
'error_code_varnotfound',
'error_code_volumedoesnotexist',
'error_code_webactionnotsupported',
'error_code_webadderror',
'error_code_webdeleteerror',
'error_code_webmodulenotfound',
'error_code_webnosuchobject',
'error_code_webrepeatingrelatedfield',
'error_code_webrequiredfieldmissing',
'error_code_webtimeout',
'error_code_webupdateerror',
'error_columnrestriction',
'error_currenterror',
'error_databaseconnectionunavailable',
'error_databasetimeout',
'error_deleteerror',
'error_fieldrestriction',
'error_filenotfound',
'error_invaliddatabase',
'error_invalidpassword',
'error_invalidusername',
'error_modulenotfound',
'error_msg',
'error_msg_aborted',
'error_msg_assert',
'error_msg_bof',
'error_msg_connectioninvalid',
'error_msg_couldnotclosefile',
'error_msg_couldnotcreateoropenfile',
'error_msg_couldnotdeletefile',
'error_msg_couldnotdisposememory',
'error_msg_couldnotlockmemory',
'error_msg_couldnotreadfromfile',
'error_msg_couldnotunlockmemory',
'error_msg_couldnotwritetofile',
'error_msg_criterianotmet',
'error_msg_datasourceerror',
'error_msg_directoryfull',
'error_msg_diskfull',
'error_msg_dividebyzero',
'error_msg_eof',
'error_msg_failure',
'error_msg_fieldrestriction',
'error_msg_file',
'error_msg_filealreadyexists',
'error_msg_filecorrupt',
'error_msg_fileinvalid',
'error_msg_fileinvalidaccessmode',
'error_msg_fileisclosed',
'error_msg_fileisopen',
'error_msg_filelocked',
'error_msg_filenotfound',
'error_msg_fileunlocked',
'error_msg_httpfilenotfound',
'error_msg_illegalinstruction',
'error_msg_illegaluseoffrozeninstance',
'error_msg_invaliddatabase',
'error_msg_invalidfilename',
'error_msg_invalidmemoryobject',
'error_msg_invalidparameter',
'error_msg_invalidpassword',
'error_msg_invalidpathname',
'error_msg_invalidusername',
'error_msg_ioerror',
'error_msg_loopaborted',
'error_msg_memory',
'error_msg_network',
'error_msg_nilpointer',
'error_msg_noerr',
'error_msg_nopermission',
'error_msg_outofmemory',
'error_msg_outofstackspace',
'error_msg_overflow',
'error_msg_postconditionfailed',
'error_msg_preconditionfailed',
'error_msg_resnotfound',
'error_msg_resource',
'error_msg_streamreaderror',
'error_msg_streamwriteerror',
'error_msg_syntaxerror',
'error_msg_tagnotfound',
'error_msg_unknownerror',
'error_msg_varnotfound',
'error_msg_volumedoesnotexist',
'error_msg_webactionnotsupported',
'error_msg_webadderror',
'error_msg_webdeleteerror',
'error_msg_webmodulenotfound',
'error_msg_webnosuchobject',
'error_msg_webrepeatingrelatedfield',
'error_msg_webrequiredfieldmissing',
'error_msg_webtimeout',
'error_msg_webupdateerror',
'error_noerror',
'error_nopermission',
'error_norecordsfound',
'error_outofmemory',
'error_pop',
'error_push',
'error_reqcolumnmissing',
'error_reqfieldmissing',
'error_requiredcolumnmissing',
'error_requiredfieldmissing',
'error_reset',
'error_seterrorcode',
'error_seterrormessage',
'error_updateerror',
'euro',
'event_schedule',
'ew',
'fail',
'fail_if',
'false',
'field',
'field_name',
'field_names',
'file',
'file_autoresolvefullpaths',
'file_chmod',
'file_control',
'file_copy',
'file_create',
'file_creationdate',
'file_currenterror',
'file_delete',
'file_exists',
'file_getlinecount',
'file_getsize',
'file_isdirectory',
'file_listdirectory',
'file_moddate',
'file_modechar',
'file_modeline',
'file_move',
'file_openread',
'file_openreadwrite',
'file_openwrite',
'file_openwriteappend',
'file_openwritetruncate',
'file_probeeol',
'file_processuploads',
'file_read',
'file_readline',
'file_rename',
'file_serve',
'file_setsize',
'file_stream',
'file_streamcopy',
'file_uploads',
'file_waitread',
'file_waittimeout',
'file_waitwrite',
'file_write',
'find_soap_ops',
'form_param',
'found_count',
'ft',
'ftp_getfile',
'ftp_getlisting',
'ftp_putfile',
'full',
'global',
'global_defined',
'global_remove',
'global_reset',
'globals',
'gt',
'gte',
'handle',
'handle_error',
'header',
'html_comment',
'http_getfile',
'ical_alarm',
'ical_attribute',
'ical_calendar',
'ical_daylight',
'ical_event',
'ical_freebusy',
'ical_item',
'ical_journal',
'ical_parse',
'ical_standard',
'ical_timezone',
'ical_todo',
'if',
'if_empty',
'if_false',
'if_null',
'if_true',
'image',
'image_url',
'img',
'include',
'include_cgi',
'include_currentpath',
'include_once',
'include_raw',
'include_url',
'inline',
'integer',
'iterate',
'iterator',
'java',
'java_bean',
'json_records',
'json_rpccall',
'keycolumn_name',
'keycolumn_value',
'keyfield_name',
'keyfield_value',
'lasso_comment',
'lasso_currentaction',
'lasso_datasourceis',
'lasso_datasourceis4d',
'lasso_datasourceisfilemaker',
'lasso_datasourceisfilemaker7',
'lasso_datasourceisfilemaker9',
'lasso_datasourceisfilemakersa',
'lasso_datasourceisjdbc',
'lasso_datasourceislassomysql',
'lasso_datasourceismysql',
'lasso_datasourceisodbc',
'lasso_datasourceisopenbase',
'lasso_datasourceisoracle',
'lasso_datasourceispostgresql',
'lasso_datasourceisspotlight',
'lasso_datasourceissqlite',
'lasso_datasourceissqlserver',
'lasso_datasourcemodulename',
'lasso_datatype',
'lasso_disableondemand',
'lasso_errorreporting',
'lasso_executiontimelimit',
'lasso_parser',
'lasso_process',
'lasso_sessionid',
'lasso_siteid',
'lasso_siteisrunning',
'lasso_sitename',
'lasso_siterestart',
'lasso_sitestart',
'lasso_sitestop',
'lasso_tagexists',
'lasso_tagmodulename',
'lasso_uniqueid',
'lasso_updatecheck',
'lasso_uptime',
'lasso_version',
'lassoapp_create',
'lassoapp_dump',
'lassoapp_flattendir',
'lassoapp_getappdata',
'lassoapp_link',
'lassoapp_list',
'lassoapp_process',
'lassoapp_unitize',
'layout_name',
'ldap',
'ldap_scope_base',
'ldap_scope_onelevel',
'ldap_scope_subtree',
'ldml',
'ldml_ldml',
'library',
'library_once',
'link',
'link_currentaction',
'link_currentactionparams',
'link_currentactionurl',
'link_currentgroup',
'link_currentgroupparams',
'link_currentgroupurl',
'link_currentrecord',
'link_currentrecordparams',
'link_currentrecordurl',
'link_currentsearch',
'link_currentsearchparams',
'link_currentsearchurl',
'link_detail',
'link_detailparams',
'link_detailurl',
'link_firstgroup',
'link_firstgroupparams',
'link_firstgroupurl',
'link_firstrecord',
'link_firstrecordparams',
'link_firstrecordurl',
'link_lastgroup',
'link_lastgroupparams',
'link_lastgroupurl',
'link_lastrecord',
'link_lastrecordparams',
'link_lastrecordurl',
'link_nextgroup',
'link_nextgroupparams',
'link_nextgroupurl',
'link_nextrecord',
'link_nextrecordparams',
'link_nextrecordurl',
'link_params',
'link_prevgroup',
'link_prevgroupparams',
'link_prevgroupurl',
'link_prevrecord',
'link_prevrecordparams',
'link_prevrecordurl',
'link_setformat',
'link_url',
'list',
'list_additem',
'list_fromlist',
'list_fromstring',
'list_getitem',
'list_itemcount',
'list_iterator',
'list_removeitem',
'list_replaceitem',
'list_reverseiterator',
'list_tostring',
'literal',
'ljax_end',
'ljax_hastarget',
'ljax_include',
'ljax_start',
'ljax_target',
'local',
'local_defined',
'local_remove',
'local_reset',
'locale_format',
'locals',
'log',
'log_always',
'log_critical',
'log_deprecated',
'log_destination_console',
'log_destination_database',
'log_destination_file',
'log_detail',
'log_level_critical',
'log_level_deprecated',
'log_level_detail',
'log_level_sql',
'log_level_warning',
'log_setdestination',
'log_sql',
'log_warning',
'logicalop_value',
'logicaloperator_value',
'loop',
'loop_abort',
'loop_continue',
'loop_count',
'lt',
'lte',
'magick_image',
'map',
'map_iterator',
'match_comparator',
'match_notrange',
'match_notregexp',
'match_range',
'match_regexp',
'math_abs',
'math_acos',
'math_add',
'math_asin',
'math_atan',
'math_atan2',
'math_ceil',
'math_converteuro',
'math_cos',
'math_div',
'math_exp',
'math_floor',
'math_internal_rand',
'math_internal_randmax',
'math_internal_srand',
'math_ln',
'math_log',
'math_log10',
'math_max',
'math_min',
'math_mod',
'math_mult',
'math_pow',
'math_random',
'math_range',
'math_rint',
'math_roman',
'math_round',
'math_sin',
'math_sqrt',
'math_sub',
'math_tan',
'maxrecords_value',
'memory_session_driver',
'mime_type',
'minimal',
'misc__srand',
'misc_randomnumber',
'misc_roman',
'misc_valid_creditcard',
'mysql_session_driver',
'named_param',
'namespace_current',
'namespace_delimiter',
'namespace_exists',
'namespace_file_fullpathexists',
'namespace_global',
'namespace_import',
'namespace_load',
'namespace_page',
'namespace_unload',
'namespace_using',
'neq',
'net',
'net_connectinprogress',
'net_connectok',
'net_typessl',
'net_typessltcp',
'net_typessludp',
'net_typetcp',
'net_typeudp',
'net_waitread',
'net_waittimeout',
'net_waitwrite',
'no_default_output',
'none',
'noprocess',
'not',
'nrx',
'nslookup',
'null',
'object',
'once',
'oneoff',
'op_logicalvalue',
'operator_logicalvalue',
'option',
'or',
'os_process',
'output',
'output_none',
'pair',
'params_up',
'pdf_barcode',
'pdf_color',
'pdf_doc',
'pdf_font',
'pdf_image',
'pdf_list',
'pdf_read',
'pdf_serve',
'pdf_table',
'pdf_text',
'percent',
'portal',
'postcondition',
'precondition',
'prettyprintingnsmap',
'prettyprintingtypemap',
'priorityqueue',
'private',
'proc_convert',
'proc_convertbody',
'proc_convertone',
'proc_extract',
'proc_extractone',
'proc_find',
'proc_first',
'proc_foreach',
'proc_get',
'proc_join',
'proc_lasso',
'proc_last',
'proc_map_entry',
'proc_null',
'proc_regexp',
'proc_xml',
'proc_xslt',
'process',
'protect',
'queue',
'rand',
'randomnumber',
'raw',
'recid_value',
'record_count',
'recordcount',
'recordid_value',
'records',
'records_array',
'records_map',
'redirect_url',
'reference',
'referer',
'referer_url',
'referrer',
'referrer_url',
'regexp',
'repeating',
'repeating_valueitem',
'repeatingvalueitem',
'repetition',
'req_column',
'req_field',
'required_column',
'required_field',
'response_fileexists',
'response_filepath',
'response_localpath',
'response_path',
'response_realm',
'resultset',
'resultset_count',
'return',
'return_value',
'reverseiterator',
'roman',
'row_count',
'rows',
'rows_array',
'run_children',
'rx',
'schema_name',
'scientific',
'search_args',
'search_arguments',
'search_columnitem',
'search_fielditem',
'search_operatoritem',
'search_opitem',
'search_valueitem',
'searchfielditem',
'searchoperatoritem',
'searchopitem',
'searchvalueitem',
'select',
'selected',
'self',
'serialize',
'series',
'server_date',
'server_day',
'server_ip',
'server_name',
'server_port',
'server_push',
'server_siteisrunning',
'server_sitestart',
'server_sitestop',
'server_time',
'session_abort',
'session_addoutputfilter',
'session_addvar',
'session_addvariable',
'session_deleteexpired',
'session_driver',
'session_end',
'session_id',
'session_removevar',
'session_removevariable',
'session_result',
'session_setdriver',
'session_start',
'set',
'set_iterator',
'set_reverseiterator',
'shown_count',
'shown_first',
'shown_last',
'site_atbegin',
'site_id',
'site_name',
'site_restart',
'skiprecords_value',
'sleep',
'soap_convertpartstopairs',
'soap_definetag',
'soap_info',
'soap_lastrequest',
'soap_lastresponse',
'soap_stub',
'sort_args',
'sort_arguments',
'sort_columnitem',
'sort_fielditem',
'sort_orderitem',
'sortcolumnitem',
'sortfielditem',
'sortorderitem',
'sqlite_createdb',
'sqlite_session_driver',
'sqlite_setsleepmillis',
'sqlite_setsleeptries',
'srand',
'stack',
'stock_quote',
'string',
'string_charfromname',
'string_concatenate',
'string_countfields',
'string_endswith',
'string_extract',
'string_findposition',
'string_findregexp',
'string_fordigit',
'string_getfield',
'string_getunicodeversion',
'string_insert',
'string_isalpha',
'string_isalphanumeric',
'string_isdigit',
'string_ishexdigit',
'string_islower',
'string_isnumeric',
'string_ispunctuation',
'string_isspace',
'string_isupper',
'string_length',
'string_lowercase',
'string_remove',
'string_removeleading',
'string_removetrailing',
'string_replace',
'string_replaceregexp',
'string_todecimal',
'string_tointeger',
'string_uppercase',
'string_validcharset',
'table_name',
'table_realname',
'tag',
'tag_name',
'tags',
'tags_find',
'tags_list',
'tcp_close',
'tcp_open',
'tcp_send',
'tcp_tcp_close',
'tcp_tcp_open',
'tcp_tcp_send',
'thread_abort',
'thread_atomic',
'thread_event',
'thread_exists',
'thread_getcurrentid',
'thread_getpriority',
'thread_info',
'thread_list',
'thread_lock',
'thread_pipe',
'thread_priority_default',
'thread_priority_high',
'thread_priority_low',
'thread_rwlock',
'thread_semaphore',
'thread_setpriority',
'token_value',
'total_records',
'treemap',
'treemap_iterator',
'true',
'url_rewrite',
'valid_creditcard',
'valid_date',
'valid_email',
'valid_url',
'value_list',
'value_listitem',
'valuelistitem',
'var',
'var_defined',
'var_remove',
'var_reset',
'var_set',
'variable',
'variable_defined',
'variable_set',
'variables',
'variant_count',
'vars',
'wap_isenabled',
'wap_maxbuttons',
'wap_maxcolumns',
'wap_maxhorzpixels',
'wap_maxrows',
'wap_maxvertpixels',
'while',
'wsdl_extract',
'wsdl_getbinding',
'wsdl_getbindingforoperation',
'wsdl_getbindingoperations',
'wsdl_getmessagenamed',
'wsdl_getmessageparts',
'wsdl_getmessagetriofromporttype',
'wsdl_getopbodystyle',
'wsdl_getopbodyuse',
'wsdl_getoperation',
'wsdl_getoplocation',
'wsdl_getopmessagetypes',
'wsdl_getopsoapaction',
'wsdl_getportaddress',
'wsdl_getportsforservice',
'wsdl_getporttype',
'wsdl_getporttypeoperation',
'wsdl_getservicedocumentation',
'wsdl_getservices',
'wsdl_gettargetnamespace',
'wsdl_issoapoperation',
'wsdl_listoperations',
'wsdl_maketest',
'xml',
'xml_extract',
'xml_rpc',
'xml_rpccall',
'xml_rw',
'xml_serve',
'xml_transform',
'xml_xml',
'xml_xmlstream',
'xmlstream',
'xsd_attribute',
'xsd_blankarraybase',
'xsd_blankbase',
'xsd_buildtype',
'xsd_cache',
'xsd_checkcardinality',
'xsd_continueall',
'xsd_continueannotation',
'xsd_continueany',
'xsd_continueanyattribute',
'xsd_continueattribute',
'xsd_continueattributegroup',
'xsd_continuechoice',
'xsd_continuecomplexcontent',
'xsd_continuecomplextype',
'xsd_continuedocumentation',
'xsd_continueextension',
'xsd_continuegroup',
'xsd_continuekey',
'xsd_continuelist',
'xsd_continuerestriction',
'xsd_continuesequence',
'xsd_continuesimplecontent',
'xsd_continuesimpletype',
'xsd_continueunion',
'xsd_deserialize',
'xsd_fullyqualifyname',
'xsd_generate',
'xsd_generateblankfromtype',
'xsd_generateblanksimpletype',
'xsd_generatetype',
'xsd_getschematype',
'xsd_issimpletype',
'xsd_loadschema',
'xsd_lookupnamespaceuri',
'xsd_lookuptype',
'xsd_processany',
'xsd_processattribute',
'xsd_processattributegroup',
'xsd_processcomplextype',
'xsd_processelement',
'xsd_processgroup',
'xsd_processimport',
'xsd_processinclude',
'xsd_processschema',
'xsd_processsimpletype',
'xsd_ref',
'xsd_type'
]
}
MEMBERS = {
'Member Methods': [
'escape_member',
'oncompare',
'sameas',
'isa',
'ascopy',
'asstring',
'ascopydeep',
'type',
'trait',
'parent',
'settrait',
'oncreate',
'listmethods',
'hasmethod',
'invoke',
'addtrait',
'isnota',
'isallof',
'isanyof',
'size',
'gettype',
'istype',
'doccomment',
'requires',
'provides',
'name',
'subtraits',
'description',
'hash',
'hosttonet16',
'hosttonet32',
'nettohost16',
'nettohost32',
'nettohost64',
'hosttonet64',
'bitset',
'bittest',
'bitflip',
'bitclear',
'bitor',
'bitand',
'bitxor',
'bitnot',
'bitshiftleft',
'bitshiftright',
'bytes',
'abs',
'div',
'dereferencepointer',
'asdecimal',
'serializationelements',
'acceptdeserializedelement',
'serialize',
'deg2rad',
'asstringhex',
'asstringoct',
'acos',
'asin',
'atan',
'atan2',
'ceil',
'cos',
'cosh',
'exp',
'fabs',
'floor',
'frexp',
'ldexp',
'log',
'log10',
'modf',
'pow',
'sin',
'sinh',
'sqrt',
'tan',
'tanh',
'erf',
'erfc',
'gamma',
'hypot',
'j0',
'j1',
'jn',
'lgamma',
'y0',
'y1',
'yn',
'isnan',
'acosh',
'asinh',
'atanh',
'cbrt',
'expm1',
'nextafter',
'scalb',
'ilogb',
'log1p',
'logb',
'remainder',
'rint',
'asinteger',
'self',
'detach',
'restart',
'resume',
'continuation',
'home',
'callsite_file',
'callsite_line',
'callsite_col',
'callstack',
'splitthread',
'threadreaddesc',
'givenblock',
'autocollectbuffer',
'calledname',
'methodname',
'invokeuntil',
'invokewhile',
'invokeautocollect',
'asasync',
'append',
'appendchar',
'private_find',
'private_findlast',
'length',
'chardigitvalue',
'private_compare',
'remove',
'charname',
'chartype',
'decompose',
'normalize',
'digit',
'foldcase',
'sub',
'integer',
'private_merge',
'unescape',
'trim',
'titlecase',
'reverse',
'getisocomment',
'getnumericvalue',
'totitle',
'toupper',
'tolower',
'lowercase',
'uppercase',
'isalnum',
'isalpha',
'isbase',
'iscntrl',
'isdigit',
'isxdigit',
'islower',
'isprint',
'isspace',
'istitle',
'ispunct',
'isgraph',
'isblank',
'isualphabetic',
'isulowercase',
'isupper',
'isuuppercase',
'isuwhitespace',
'iswhitespace',
'encodehtml',
'decodehtml',
'encodexml',
'decodexml',
'encodehtmltoxml',
'getpropertyvalue',
'hasbinaryproperty',
'asbytes',
'find',
'findlast',
'contains',
'get',
'equals',
'compare',
'comparecodepointorder',
'padleading',
'padtrailing',
'merge',
'split',
'removeleading',
'removetrailing',
'beginswith',
'endswith',
'replace',
'values',
'foreachcharacter',
'foreachlinebreak',
'foreachwordbreak',
'eachwordbreak',
'eachcharacter',
'foreachmatch',
'eachmatch',
'encodesql92',
'encodesql',
'keys',
'decomposeassignment',
'firstcomponent',
'ifempty',
'eachsub',
'stripfirstcomponent',
'isnotempty',
'first',
'lastcomponent',
'foreachpathcomponent',
'isfullpath',
'back',
'second',
'componentdelimiter',
'isempty',
'foreachsub',
'front',
'striplastcomponent',
'eachcomponent',
'eachline',
'splitextension',
'hastrailingcomponent',
'last',
'ifnotempty',
'extensiondelimiter',
'eachword',
'substring',
'setsize',
'reserve',
'getrange',
'private_setrange',
'importas',
'import8bits',
'import32bits',
'import64bits',
'import16bits',
'importbytes',
'importpointer',
'export8bits',
'export16bits',
'export32bits',
'export64bits',
'exportbytes',
'exportsigned8bits',
'exportsigned16bits',
'exportsigned32bits',
'exportsigned64bits',
'marker',
'swapbytes',
'encodeurl',
'decodeurl',
'encodebase64',
'decodebase64',
'encodeqp',
'decodeqp',
'encodemd5',
'encodehex',
'decodehex',
'uncompress',
'compress',
'detectcharset',
'bestcharset',
'crc',
'importstring',
'setrange',
'exportas',
'exportstring',
'exportpointerbits',
'foreachbyte',
'eachbyte',
'setposition',
'position',
'value',
'join',
'asstaticarray',
'foreach',
'findposition',
'min',
'groupjoin',
'orderbydescending',
'average',
'take',
'do',
'selectmany',
'skip',
'select',
'sum',
'max',
'asarray',
'thenbydescending',
'aslist',
'orderby',
'thenby',
'where',
'groupby',
'asgenerator',
'typename',
'returntype',
'restname',
'paramdescs',
'action',
'statement',
'inputcolumns',
'keycolumns',
'returncolumns',
'sortcolumns',
'skiprows',
'maxrows',
'rowsfound',
'statementonly',
'lop',
'databasename',
'tablename',
'schemaname',
'hostid',
'hostdatasource',
'hostname',
'hostport',
'hostusername',
'hostpassword',
'hostschema',
'hosttableencoding',
'hostextra',
'hostisdynamic',
'refobj',
'connection',
'prepared',
'getset',
'addset',
'numsets',
'addrow',
'addcolumninfo',
'forcedrowid',
'makeinheritedcopy',
'filename',
'expose',
'recover',
'insert',
'removeall',
'count',
'exchange',
'findindex',
'foreachpair',
'foreachkey',
'sort',
'insertfirst',
'difference',
'removeback',
'insertback',
'removelast',
'removefront',
'insertfrom',
'intersection',
'top',
'insertlast',
'push',
'union',
'removefirst',
'insertfront',
'pop',
'fd',
'family',
'isvalid',
'isssl',
'open',
'close',
'read',
'write',
'ioctl',
'seek',
'mode',
'mtime',
'atime',
'dup',
'dup2',
'fchdir',
'fchown',
'fsync',
'ftruncate',
'fchmod',
'sendfd',
'receivefd',
'readobject',
'tryreadobject',
'writeobject',
'leaveopen',
'rewind',
'tell',
'language',
'script',
'country',
'variant',
'displaylanguage',
'displayscript',
'displaycountry',
'displayvariant',
'displayname',
'basename',
'keywords',
'iso3language',
'iso3country',
'formatas',
'formatnumber',
'parsenumber',
'parseas',
'format',
'parse',
'add',
'roll',
'set',
'getattr',
'setattr',
'clear',
'isset',
'settimezone',
'timezone',
'time',
'indaylighttime',
'createdocument',
'parsedocument',
'hasfeature',
'createdocumenttype',
'nodename',
'nodevalue',
'nodetype',
'parentnode',
'childnodes',
'firstchild',
'lastchild',
'previoussibling',
'nextsibling',
'attributes',
'ownerdocument',
'namespaceuri',
'prefix',
'localname',
'insertbefore',
'replacechild',
'removechild',
'appendchild',
'haschildnodes',
'clonenode',
'issupported',
'hasattributes',
'extract',
'extractone',
'extractfast',
'transform',
'foreachchild',
'eachchild',
'extractfastone',
'data',
'substringdata',
'appenddata',
'insertdata',
'deletedata',
'replacedata',
'doctype',
'implementation',
'documentelement',
'createelement',
'createdocumentfragment',
'createtextnode',
'createcomment',
'createcdatasection',
'createprocessinginstruction',
'createattribute',
'createentityreference',
'getelementsbytagname',
'importnode',
'createelementns',
'createattributens',
'getelementsbytagnamens',
'getelementbyid',
'tagname',
'getattribute',
'setattribute',
'removeattribute',
'getattributenode',
'setattributenode',
'removeattributenode',
'getattributens',
'setattributens',
'removeattributens',
'getattributenodens',
'setattributenodens',
'hasattribute',
'hasattributens',
'setname',
'contents',
'specified',
'ownerelement',
'splittext',
'notationname',
'publicid',
'systemid',
'target',
'entities',
'notations',
'internalsubset',
'item',
'getnameditem',
'getnameditemns',
'setnameditem',
'setnameditemns',
'removenameditem',
'removenameditemns',
'askeyedgenerator',
'eachpair',
'eachkey',
'next',
'readstring',
'readattributevalue',
'attributecount',
'baseuri',
'depth',
'hasvalue',
'isemptyelement',
'xmllang',
'getattributenamespace',
'lookupnamespace',
'movetoattribute',
'movetoattributenamespace',
'movetofirstattribute',
'movetonextattribute',
'movetoelement',
'prepare',
'last_insert_rowid',
'total_changes',
'interrupt',
'errcode',
'errmsg',
'addmathfunctions',
'finalize',
'step',
'bind_blob',
'bind_double',
'bind_int',
'bind_null',
'bind_text',
'bind_parameter_index',
'reset',
'column_count',
'column_name',
'column_decltype',
'column_blob',
'column_double',
'column_int64',
'column_text',
'column_type',
'ismultipart',
'gotfileupload',
'setmaxfilesize',
'getparts',
'trackingid',
'currentfile',
'addtobuffer',
'input',
'replacepattern',
'findpattern',
'ignorecase',
'setinput',
'setreplacepattern',
'setfindpattern',
'setignorecase',
'output',
'appendreplacement',
'matches',
'private_replaceall',
'appendtail',
'groupcount',
'matchposition',
'matchesstart',
'private_replacefirst',
'private_split',
'matchstring',
'replaceall',
'replacefirst',
'findall',
'findcount',
'findfirst',
'findsymbols',
'loadlibrary',
'getlibrary',
'atend',
'f',
'r',
'form',
'gen',
'callfirst',
'key',
'by',
'from',
'init',
'to',
'd',
't',
'object',
'inneroncompare',
'members',
'writeid',
'addmember',
'refid',
'index',
'objects',
'tabs',
'trunk',
'trace',
'asxml',
'tabstr',
'toxmlstring',
'document',
'idmap',
'readidobjects',
'left',
'right',
'up',
'red',
'root',
'getnode',
'firstnode',
'lastnode',
'nextnode',
'private_rebalanceforremove',
'private_rotateleft',
'private_rotateright',
'private_rebalanceforinsert',
'eachnode',
'foreachnode',
'encoding',
'resolvelinks',
'readbytesfully',
'dowithclose',
'readsomebytes',
'readbytes',
'writestring',
'parentdir',
'aslazystring',
'path',
'openread',
'openwrite',
'openwriteonly',
'openappend',
'opentruncate',
'writebytes',
'exists',
'modificationtime',
'lastaccesstime',
'modificationdate',
'lastaccessdate',
'delete',
'moveto',
'copyto',
'linkto',
'flush',
'chmod',
'chown',
'isopen',
'setmarker',
'setmode',
'foreachline',
'lock',
'unlock',
'trylock',
'testlock',
'perms',
'islink',
'isdir',
'realpath',
'openwith',
'asraw',
'rawdiff',
'getformat',
'setformat',
'subtract',
'gmt',
'dst',
'era',
'year',
'month',
'week',
'weekofyear',
'weekofmonth',
'day',
'dayofmonth',
'dayofyear',
'dayofweek',
'dayofweekinmonth',
'ampm',
'am',
'pm',
'hour',
'hourofday',
'hourofampm',
'minute',
'millisecond',
'zoneoffset',
'dstoffset',
'yearwoy',
'dowlocal',
'extendedyear',
'julianday',
'millisecondsinday',
'firstdayofweek',
'fixformat',
'minutesbetween',
'hoursbetween',
'secondsbetween',
'daysbetween',
'businessdaysbetween',
'pdifference',
'getfield',
'create',
'setcwd',
'foreachentry',
'eachpath',
'eachfilepath',
'eachdirpath',
'each',
'eachfile',
'eachdir',
'eachpathrecursive',
'eachfilepathrecursive',
'eachdirpathrecursive',
'eachentry',
'makefullpath',
'annotate',
'blur',
'command',
'composite',
'contrast',
'convert',
'crop',
'execute',
'enhance',
'flipv',
'fliph',
'modulate',
'rotate',
'save',
'scale',
'sharpen',
'addcomment',
'comments',
'describe',
'file',
'height',
'pixel',
'resolutionv',
'resolutionh',
'width',
'setcolorspace',
'colorspace',
'debug',
'histogram',
'imgptr',
'appendimagetolist',
'fx',
'applyheatcolors',
'authenticate',
'search',
'searchurl',
'readerror',
'readline',
'setencoding',
'closewrite',
'exitcode',
'getversion',
'findclass',
'throw',
'thrownew',
'exceptionoccurred',
'exceptiondescribe',
'exceptionclear',
'fatalerror',
'newglobalref',
'deleteglobalref',
'deletelocalref',
'issameobject',
'allocobject',
'newobject',
'getobjectclass',
'isinstanceof',
'getmethodid',
'callobjectmethod',
'callbooleanmethod',
'callbytemethod',
'callcharmethod',
'callshortmethod',
'callintmethod',
'calllongmethod',
'callfloatmethod',
'calldoublemethod',
'callvoidmethod',
'callnonvirtualobjectmethod',
'callnonvirtualbooleanmethod',
'callnonvirtualbytemethod',
'callnonvirtualcharmethod',
'callnonvirtualshortmethod',
'callnonvirtualintmethod',
'callnonvirtuallongmethod',
'callnonvirtualfloatmethod',
'callnonvirtualdoublemethod',
'callnonvirtualvoidmethod',
'getfieldid',
'getobjectfield',
'getbooleanfield',
'getbytefield',
'getcharfield',
'getshortfield',
'getintfield',
'getlongfield',
'getfloatfield',
'getdoublefield',
'setobjectfield',
'setbooleanfield',
'setbytefield',
'setcharfield',
'setshortfield',
'setintfield',
'setlongfield',
'setfloatfield',
'setdoublefield',
'getstaticmethodid',
'callstaticobjectmethod',
'callstaticbooleanmethod',
'callstaticbytemethod',
'callstaticcharmethod',
'callstaticshortmethod',
'callstaticintmethod',
'callstaticlongmethod',
'callstaticfloatmethod',
'callstaticdoublemethod',
'callstaticvoidmethod',
'getstaticfieldid',
'getstaticobjectfield',
'getstaticbooleanfield',
'getstaticbytefield',
'getstaticcharfield',
'getstaticshortfield',
'getstaticintfield',
'getstaticlongfield',
'getstaticfloatfield',
'getstaticdoublefield',
'setstaticobjectfield',
'setstaticbooleanfield',
'setstaticbytefield',
'setstaticcharfield',
'setstaticshortfield',
'setstaticintfield',
'setstaticlongfield',
'setstaticfloatfield',
'setstaticdoublefield',
'newstring',
'getstringlength',
'getstringchars',
'getarraylength',
'newobjectarray',
'getobjectarrayelement',
'setobjectarrayelement',
'newbooleanarray',
'newbytearray',
'newchararray',
'newshortarray',
'newintarray',
'newlongarray',
'newfloatarray',
'newdoublearray',
'getbooleanarrayelements',
'getbytearrayelements',
'getchararrayelements',
'getshortarrayelements',
'getintarrayelements',
'getlongarrayelements',
'getfloatarrayelements',
'getdoublearrayelements',
'getbooleanarrayregion',
'getbytearrayregion',
'getchararrayregion',
'getshortarrayregion',
'getintarrayregion',
'getlongarrayregion',
'getfloatarrayregion',
'getdoublearrayregion',
'setbooleanarrayregion',
'setbytearrayregion',
'setchararrayregion',
'setshortarrayregion',
'setintarrayregion',
'setlongarrayregion',
'setfloatarrayregion',
'setdoublearrayregion',
'monitorenter',
'monitorexit',
'fromreflectedmethod',
'fromreflectedfield',
'toreflectedmethod',
'toreflectedfield',
'exceptioncheck',
'dbtablestable',
'dstable',
'dsdbtable',
'dshoststable',
'fieldstable',
'sql',
'adddatasource',
'loaddatasourceinfo',
'loaddatasourcehostinfo',
'getdatasource',
'getdatasourceid',
'getdatasourcename',
'listdatasources',
'listactivedatasources',
'removedatasource',
'listdatasourcehosts',
'listhosts',
'adddatasourcehost',
'getdatasourcehost',
'removedatasourcehost',
'getdatabasehost',
'gethostdatabase',
'listalldatabases',
'listdatasourcedatabases',
'listhostdatabases',
'getdatasourcedatabase',
'getdatasourcedatabasebyid',
'getdatabasebyname',
'getdatabasebyid',
'getdatabasebyalias',
'adddatasourcedatabase',
'removedatasourcedatabase',
'listalltables',
'listdatabasetables',
'getdatabasetable',
'getdatabasetablebyalias',
'getdatabasetablebyid',
'gettablebyid',
'adddatabasetable',
'removedatabasetable',
'removefield',
'maybevalue',
'getuniquealiasname',
'makecolumnlist',
'makecolumnmap',
'datasourcecolumns',
'datasourcemap',
'hostcolumns',
'hostmap',
'hostcolumns2',
'hostmap2',
'databasecolumns',
'databasemap',
'tablecolumns',
'tablemap',
'databasecolumnnames',
'hostcolumnnames',
'hostcolumnnames2',
'datasourcecolumnnames',
'tablecolumnnames',
'bindcount',
'sqlite3',
'db',
'tables',
'hastable',
'tablehascolumn',
'eachrow',
'bindparam',
'foreachrow',
'executelazy',
'executenow',
'lastinsertid',
'table',
'bindone',
'src',
'stat',
'colmap',
'getcolumn',
'locals',
'getcolumns',
'bodybytes',
'headerbytes',
'ready',
'token',
'url',
'done',
'header',
'result',
'statuscode',
'raw',
'version',
'perform',
'performonce',
's',
'linediffers',
'sourcefile',
'sourceline',
'sourcecolumn',
'continuationpacket',
'continuationpoint',
'continuationstack',
'features',
'lastpoint',
'net',
'running',
'source',
'run',
'pathtouri',
'sendpacket',
'readpacket',
'handlefeatureset',
'handlefeatureget',
'handlestdin',
'handlestdout',
'handlestderr',
'isfirststep',
'handlecontinuation',
'ensurestopped',
'handlestackget',
'handlecontextnames',
'formatcontextelements',
'formatcontextelement',
'bptypetostr',
'bptoxml',
'handlebreakpointlist',
'handlebreakpointget',
'handlebreakpointremove',
'condtoint',
'inttocond',
'handlebreakpointupdate',
'handlebreakpointset',
'handlecontextget',
'handlesource',
'error',
'setstatus',
'getstatus',
'stoprunning',
'pollide',
'polldbg',
'runonce',
'arguments',
'id',
'argumentvalue',
'end',
'start',
'days',
'foreachday',
'padzero',
'actionparams',
'capi',
'doclose',
'dsinfo',
'isnothing',
'named',
'workinginputcolumns',
'workingkeycolumns',
'workingreturncolumns',
'workingsortcolumns',
'workingkeyfield_name',
'scanfordatasource',
'configureds',
'configuredskeys',
'scrubkeywords',
'closeprepared',
'filterinputcolumn',
'prev',
'head',
'removenode',
'listnode',
'bind',
'listen',
'remoteaddress',
'shutdownrdwr',
'shutdownwr',
'shutdownrd',
'localaddress',
'accept',
'connect',
'foreachaccept',
'writeobjecttcp',
'readobjecttcp',
'begintls',
'endtls',
'loadcerts',
'sslerrfail',
'fromname',
'fromport',
'env',
'checked',
'getclass',
'jobjectisa',
'new',
'callvoid',
'callint',
'callfloat',
'callboolean',
'callobject',
'callstring',
'callstaticobject',
'callstaticstring',
'callstaticint',
'callstaticboolean',
'chk',
'makecolor',
'realdoc',
'addbarcode',
'addchapter',
'addcheckbox',
'addcombobox',
'addhiddenfield',
'addimage',
'addlist',
'addpage',
'addparagraph',
'addpasswordfield',
'addphrase',
'addradiobutton',
'addradiogroup',
'addresetbutton',
'addsection',
'addselectlist',
'addsubmitbutton',
'addtable',
'addtextarea',
'addtextfield',
'addtext',
'arc',
'circle',
'closepath',
'curveto',
'drawtext',
'getcolor',
'getheader',
'getheaders',
'getmargins',
'getpagenumber',
'getsize',
'insertpage',
'line',
'rect',
'setcolor',
'setfont',
'setlinewidth',
'setpagenumber',
'conventionaltop',
'lowagiefont',
'jcolor',
'jbarcode',
'generatechecksum',
'getbarheight',
'getbarmultiplier',
'getbarwidth',
'getbaseline',
'getcode',
'getfont',
'gettextalignment',
'gettextsize',
'setbarheight',
'setbarmultiplier',
'setbarwidth',
'setbaseline',
'setcode',
'setgeneratechecksum',
'setshowchecksum',
'settextalignment',
'settextsize',
'showchecksum',
'showcode39startstop',
'showeanguardbars',
'jfont',
'getencoding',
'getface',
'getfullfontname',
'getpsfontname',
'getsupportedencodings',
'istruetype',
'getstyle',
'getbold',
'getitalic',
'getunderline',
'setface',
'setunderline',
'setbold',
'setitalic',
'textwidth',
'jimage',
'ontop',
'jlist',
'jread',
'addjavascript',
'exportfdf',
'extractimage',
'fieldnames',
'fieldposition',
'fieldtype',
'fieldvalue',
'gettext',
'importfdf',
'javascript',
'pagecount',
'pagerotation',
'pagesize',
'setfieldvalue',
'setpagerange',
'jtable',
'getabswidth',
'getalignment',
'getbordercolor',
'getborderwidth',
'getcolumncount',
'getpadding',
'getrowcount',
'getspacing',
'setalignment',
'setbordercolor',
'setborderwidth',
'setpadding',
'setspacing',
'jtext',
'element',
'foreachspool',
'unspool',
'err',
'in',
'out',
'pid',
'wait',
'testexitcode',
'maxworkers',
'tasks',
'workers',
'startone',
'addtask',
'waitforcompletion',
'isidle',
'scanworkers',
'scantasks',
'z',
'addfile',
'adddir',
'adddirpath',
'foreachfile',
'foreachfilename',
'eachfilename',
'filenames',
'getfile',
'meta',
'criteria',
'map',
'valid',
'lazyvalue',
'dns_response',
'qdcount',
'qdarray',
'answer',
'bitformat',
'consume_rdata',
'consume_string',
'consume_label',
'consume_domain',
'consume_message',
'errors',
'warnings',
'addwarning',
'adderror',
'renderbytes',
'renderstring',
'components',
'addcomponent',
'addcomponents',
'body',
'renderdocumentbytes',
'contenttype',
'mime_boundary',
'mime_contenttype',
'mime_hdrs',
'addtextpart',
'addhtmlpart',
'addattachment',
'addpart',
'recipients',
'pop_capa',
'pop_debug',
'pop_err',
'pop_get',
'pop_ids',
'pop_index',
'pop_log',
'pop_mode',
'pop_net',
'pop_res',
'pop_server',
'pop_timeout',
'pop_token',
'pop_cmd',
'user',
'pass',
'apop',
'auth',
'quit',
'rset',
'list',
'uidl',
'retr',
'dele',
'noop',
'capa',
'stls',
'authorize',
'retrieve',
'headers',
'uniqueid',
'capabilities',
'cancel',
'results',
'lasterror',
'parse_body',
'parse_boundary',
'parse_charset',
'parse_content_disposition',
'parse_content_transfer_encoding',
'parse_content_type',
'parse_hdrs',
'parse_mode',
'parse_msg',
'parse_parts',
'parse_rawhdrs',
'rawheaders',
'content_type',
'content_transfer_encoding',
'content_disposition',
'boundary',
'charset',
'cc',
'subject',
'bcc',
'date',
'pause',
'continue',
'touch',
'refresh',
'queue',
'status',
'queue_status',
'active_tick',
'getprefs',
'initialize',
'queue_maintenance',
'queue_messages',
'content',
'rectype',
'requestid',
'cachedappprefix',
'cachedroot',
'cookiesary',
'fcgireq',
'fileuploadsary',
'headersmap',
'httpauthorization',
'postparamsary',
'queryparamsary',
'documentroot',
'appprefix',
'httpconnection',
'httpcookie',
'httphost',
'httpuseragent',
'httpcachecontrol',
'httpreferer',
'httpreferrer',
'contentlength',
'pathtranslated',
'remoteaddr',
'remoteport',
'requestmethod',
'requesturi',
'scriptfilename',
'scriptname',
'scripturi',
'scripturl',
'serveraddr',
'serveradmin',
'servername',
'serverport',
'serverprotocol',
'serversignature',
'serversoftware',
'pathinfo',
'gatewayinterface',
'httpaccept',
'httpacceptencoding',
'httpacceptlanguage',
'ishttps',
'cookies',
'cookie',
'rawheader',
'queryparam',
'postparam',
'param',
'queryparams',
'querystring',
'postparams',
'poststring',
'params',
'fileuploads',
'isxhr',
'reqid',
'statusmsg',
'requestparams',
'stdin',
'mimes',
'writeheaderline',
'writeheaderbytes',
'writebodybytes',
'cap',
'n',
'proxying',
'stop',
'printsimplemsg',
'handleevalexpired',
'handlenormalconnection',
'handledevconnection',
'splittoprivatedev',
'getmode',
'curl',
'novaluelists',
'makeurl',
'choosecolumntype',
'getdatabasetablepart',
'getlcapitype',
'buildquery',
'getsortfieldspart',
'endjs',
'title',
'addjs',
'addjstext',
'addendjs',
'addendjstext',
'addcss',
'addfavicon',
'attrs',
'dtdid',
'lang',
'xhtml',
'style',
'gethtmlattr',
'hashtmlattr',
'onmouseover',
'onkeydown',
'dir',
'onclick',
'onkeypress',
'onmouseout',
'onkeyup',
'onmousemove',
'onmouseup',
'ondblclick',
'onmousedown',
'sethtmlattr',
'class',
'gethtmlattrstring',
'tag',
'code',
'msg',
'scripttype',
'defer',
'httpequiv',
'scheme',
'href',
'hreflang',
'linktype',
'rel',
'rev',
'media',
'declare',
'classid',
'codebase',
'objecttype',
'codetype',
'archive',
'standby',
'usemap',
'tabindex',
'styletype',
'method',
'enctype',
'accept_charset',
'onsubmit',
'onreset',
'accesskey',
'inputtype',
'maxlength',
'for',
'selected',
'label',
'multiple',
'buff',
'wroteheaders',
'pullrequest',
'pullrawpost',
'shouldclose',
'pullurlpost',
'pullmimepost',
'pullhttpheader',
'pulloneheaderline',
'parseoneheaderline',
'addoneheaderline',
'safeexport8bits',
'writeheader',
'fail',
'connhandler',
'port',
'connectionhandler',
'acceptconnections',
'gotconnection',
'failnoconnectionhandler',
'splitconnection',
'scriptextensions',
'sendfile',
'probemimetype',
'appname',
'inits',
'installs',
'rootmap',
'install',
'getappsource',
'preflight',
'splituppath',
'handleresource',
'handledefinitionhead',
'handledefinitionbody',
'handledefinitionresource',
'execinstalls',
'execinits',
'payload',
'fullpath',
'resourcename',
'issourcefile',
'resourceinvokable',
'srcpath',
'resources',
'eligiblepath',
'eligiblepaths',
'expiresminutes',
'moddatestr',
'zips',
'addzip',
'getzipfilebytes',
'resourcedata',
'zip',
'zipfile',
'zipname',
'zipfilename',
'rawinvokable',
'route',
'setdestination',
'getprowcount',
'encodepassword',
'checkuser',
'needinitialization',
'adduser',
'getuserid',
'getuser',
'getuserbykey',
'removeuser',
'listusers',
'listusersbygroup',
'countusersbygroup',
'addgroup',
'updategroup',
'getgroupid',
'getgroup',
'removegroup',
'listgroups',
'listgroupsbyuser',
'addusertogroup',
'removeuserfromgroup',
'removeuserfromallgroups',
'md5hex',
'usercolumns',
'groupcolumns',
'expireminutes',
'lasttouched',
'hasexpired',
'idealinmemory',
'maxinmemory',
'nextprune',
'nextprunedelta',
'sessionsdump',
'startup',
'validatesessionstable',
'createtable',
'fetchdata',
'savedata',
'kill',
'expire',
'prune',
'entry',
'host',
'tb',
'setdefaultstorage',
'getdefaultstorage',
'onconvert',
'send',
'nodelist',
'delim',
'subnode',
'subnodes',
'addsubnode',
'removesubnode',
'nodeforpath',
'representnoderesult',
'mime',
'extensions',
'representnode',
'jsonfornode',
'defaultcontentrepresentation',
'supportscontentrepresentation',
'htmlcontent',
'appmessage',
'appstatus',
'atends',
'chunked',
'cookiesarray',
'didinclude',
'errstack',
'headersarray',
'includestack',
'outputencoding',
'sessionsmap',
'htmlizestacktrace',
'includes',
'respond',
'sendresponse',
'sendchunk',
'makecookieyumyum',
'getinclude',
'include',
'includeonce',
'includelibrary',
'includelibraryonce',
'includebytes',
'addatend',
'setcookie',
'addheader',
'replaceheader',
'setheaders',
'rawcontent',
'redirectto',
'htmlizestacktracelink',
'doatbegins',
'handlelassoappcontent',
'handlelassoappresponse',
'domainbody',
'establisherrorstate',
'tryfinderrorfile',
'doatends',
'dosessions',
'makenonrelative',
'pushinclude',
'popinclude',
'findinclude',
'checkdebugging',
'splitdebuggingthread',
'matchtriggers',
'rules',
'shouldabort',
'gettrigger',
'trigger',
'rule',
'foo',
'jsonlabel',
'jsonhtml',
'jsonisleaf',
'acceptpost',
'csscontent',
'jscontent'
],
'Lasso 8 Member Tags': [
'accept',
'add',
'addattachment',
'addattribute',
'addbarcode',
'addchapter',
'addcheckbox',
'addchild',
'addcombobox',
'addcomment',
'addcontent',
'addhiddenfield',
'addhtmlpart',
'addimage',
'addjavascript',
'addlist',
'addnamespace',
'addnextsibling',
'addpage',
'addparagraph',
'addparenttype',
'addpart',
'addpasswordfield',
'addphrase',
'addprevsibling',
'addradiobutton',
'addradiogroup',
'addresetbutton',
'addsection',
'addselectlist',
'addsibling',
'addsubmitbutton',
'addtable',
'addtext',
'addtextarea',
'addtextfield',
'addtextpart',
'alarms',
'annotate',
'answer',
'append',
'appendreplacement',
'appendtail',
'arc',
'asasync',
'astype',
'atbegin',
'atbottom',
'atend',
'atfarleft',
'atfarright',
'attop',
'attributecount',
'attributes',
'authenticate',
'authorize',
'backward',
'baseuri',
'bcc',
'beanproperties',
'beginswith',
'bind',
'bitand',
'bitclear',
'bitflip',
'bitformat',
'bitnot',
'bitor',
'bitset',
'bitshiftleft',
'bitshiftright',
'bittest',
'bitxor',
'blur',
'body',
'boundary',
'bytes',
'call',
'cancel',
'capabilities',
'cc',
'chardigitvalue',
'charname',
'charset',
'chartype',
'children',
'circle',
'close',
'closepath',
'closewrite',
'code',
'colorspace',
'command',
'comments',
'compare',
'comparecodepointorder',
'compile',
'composite',
'connect',
'contains',
'content_disposition',
'content_transfer_encoding',
'content_type',
'contents',
'contrast',
'convert',
'crop',
'curveto',
'data',
'date',
'day',
'daylights',
'dayofweek',
'dayofyear',
'decrement',
'delete',
'depth',
'describe',
'description',
'deserialize',
'detach',
'detachreference',
'difference',
'digit',
'document',
'down',
'drawtext',
'dst',
'dump',
'endswith',
'enhance',
'equals',
'errors',
'eval',
'events',
'execute',
'export16bits',
'export32bits',
'export64bits',
'export8bits',
'exportfdf',
'exportstring',
'extract',
'extractone',
'fieldnames',
'fieldtype',
'fieldvalue',
'file',
'find',
'findindex',
'findnamespace',
'findnamespacebyhref',
'findpattern',
'findposition',
'first',
'firstchild',
'fliph',
'flipv',
'flush',
'foldcase',
'foreach',
'format',
'forward',
'freebusies',
'freezetype',
'freezevalue',
'from',
'fulltype',
'generatechecksum',
'get',
'getabswidth',
'getalignment',
'getattribute',
'getattributenamespace',
'getbarheight',
'getbarmultiplier',
'getbarwidth',
'getbaseline',
'getbordercolor',
'getborderwidth',
'getcode',
'getcolor',
'getcolumncount',
'getencoding',
'getface',
'getfont',
'getformat',
'getfullfontname',
'getheaders',
'getmargins',
'getmethod',
'getnumericvalue',
'getpadding',
'getpagenumber',
'getparams',
'getproperty',
'getpsfontname',
'getrange',
'getrowcount',
'getsize',
'getspacing',
'getsupportedencodings',
'gettextalignment',
'gettextsize',
'gettype',
'gmt',
'groupcount',
'hasattribute',
'haschildren',
'hasvalue',
'header',
'headers',
'height',
'histogram',
'hosttonet16',
'hosttonet32',
'hour',
'id',
'ignorecase',
'import16bits',
'import32bits',
'import64bits',
'import8bits',
'importfdf',
'importstring',
'increment',
'input',
'insert',
'insertatcurrent',
'insertfirst',
'insertfrom',
'insertlast',
'insertpage',
'integer',
'intersection',
'invoke',
'isa',
'isalnum',
'isalpha',
'isbase',
'iscntrl',
'isdigit',
'isemptyelement',
'islower',
'isopen',
'isprint',
'isspace',
'istitle',
'istruetype',
'isualphabetic',
'isulowercase',
'isupper',
'isuuppercase',
'isuwhitespace',
'iswhitespace',
'iterator',
'javascript',
'join',
'journals',
'key',
'keys',
'last',
'lastchild',
'lasterror',
'left',
'length',
'line',
'listen',
'localaddress',
'localname',
'lock',
'lookupnamespace',
'lowercase',
'marker',
'matches',
'matchesstart',
'matchposition',
'matchstring',
'merge',
'millisecond',
'minute',
'mode',
'modulate',
'month',
'moveto',
'movetoattributenamespace',
'movetoelement',
'movetofirstattribute',
'movetonextattribute',
'name',
'namespaces',
'namespaceuri',
'nettohost16',
'nettohost32',
'newchild',
'next',
'nextsibling',
'nodetype',
'open',
'output',
'padleading',
'padtrailing',
'pagecount',
'pagesize',
'paraminfo',
'params',
'parent',
'path',
'pixel',
'position',
'prefix',
'previoussibling',
'properties',
'rawheaders',
'read',
'readattributevalue',
'readerror',
'readfrom',
'readline',
'readlock',
'readstring',
'readunlock',
'recipients',
'rect',
'refcount',
'referrals',
'remoteaddress',
'remove',
'removeall',
'removeattribute',
'removechild',
'removecurrent',
'removefirst',
'removelast',
'removeleading',
'removenamespace',
'removetrailing',
'render',
'replace',
'replaceall',
'replacefirst',
'replacepattern',
'replacewith',
'reserve',
'reset',
'resolutionh',
'resolutionv',
'response',
'results',
'retrieve',
'returntype',
'reverse',
'reverseiterator',
'right',
'rotate',
'run',
'save',
'scale',
'search',
'second',
'send',
'serialize',
'set',
'setalignment',
'setbarheight',
'setbarmultiplier',
'setbarwidth',
'setbaseline',
'setblocking',
'setbordercolor',
'setborderwidth',
'setbytes',
'setcode',
'setcolor',
'setcolorspace',
'setdatatype',
'setencoding',
'setface',
'setfieldvalue',
'setfont',
'setformat',
'setgeneratechecksum',
'setheight',
'setlassodata',
'setlinewidth',
'setmarker',
'setmode',
'setname',
'setpadding',
'setpagenumber',
'setpagerange',
'setposition',
'setproperty',
'setrange',
'setshowchecksum',
'setsize',
'setspacing',
'settemplate',
'settemplatestr',
'settextalignment',
'settextdata',
'settextsize',
'settype',
'setunderline',
'setwidth',
'setxmldata',
'sharpen',
'showchecksum',
'showcode39startstop',
'showeanguardbars',
'signal',
'signalall',
'size',
'smooth',
'sort',
'sortwith',
'split',
'standards',
'steal',
'subject',
'substring',
'subtract',
'swapbytes',
'textwidth',
'time',
'timezones',
'titlecase',
'to',
'todos',
'tolower',
'totitle',
'toupper',
'transform',
'trim',
'type',
'unescape',
'union',
'uniqueid',
'unlock',
'unserialize',
'up',
'uppercase',
'value',
'values',
'valuetype',
'wait',
'waskeyword',
'week',
'width',
'write',
'writelock',
'writeto',
'writeunlock',
'xmllang',
'xmlschematype',
'year'
]
}
| mit |
Hitachi-Data-Systems/org-chart-builder | openpyxl/styles/tests/test_fills.py | 1 | 1386 | # Copyright (c) 2010-2014 openpyxl
import pytest
from openpyxl.styles.colors import BLACK, WHITE, Color
@pytest.fixture
def GradientFill():
from openpyxl.styles.fills import GradientFill
return GradientFill
class TestGradientFill:
def test_empty_ctor(self, GradientFill):
gf = GradientFill()
assert gf.fill_type == 'linear'
assert gf.degree == 0
assert gf.left == 0
assert gf.right == 0
assert gf.top == 0
assert gf.bottom == 0
assert gf.stop == ()
def test_ctor(self, GradientFill):
gf = GradientFill(degree=90, left=1, right=2, top=3, bottom=4)
assert gf.degree == 90
assert gf.left == 1
assert gf.right == 2
assert gf.top == 3
assert gf.bottom == 4
def test_sequence(self, GradientFill):
colors = [Color(BLACK), Color(WHITE)]
gf = GradientFill(stop=colors)
assert gf.stop == tuple(colors)
def test_invalid_sequence(self, GradientFill):
colors = [BLACK, WHITE]
with pytest.raises(TypeError):
GradientFill(stop=colors)
def test_dict_interface(self, GradientFill):
gf = GradientFill(degree=90, left=1, right=2, top=3, bottom=4)
assert dict(gf) == {'bottom': "4", 'degree': "90", 'left':"1",
'right': "2", 'top': "3", 'type': 'linear'}
| apache-2.0 |
vkris/pyzookeeper | examples/leader_check.py | 1 | 1166 | import sys, os, time
sys.path[0:0] = [os.path.join(os.path.dirname(__file__), ".."),]
from pyzookeeper import election
from pyzookeeper import log
if __name__=="__main__":
# Leader will execute the parameter passed below
if ( len( sys.argv) < 3):
print 'Usage:\n python '+ sys.argv[0]+' zookeeperIP:Port zNodeName failure_script'
print 'zookeeperIP:Port - Point to a zookeeper instance'
print 'zNodeName - A name to store in zookeeper'
sys.exit(0)
ipAndPort = sys.argv[1]
zNodeName = '/'+sys.argv[2]
failure_script = sys.argv[3]
e = election(ipAndPort,zNodeName)
# The below variable is to make sure the leader/candidate
# script is executed only once every change of status.
failed = False
while(True):
if ( not e.leaderExists(zNodeName) ):
log.info("Leader not found, so executing failure script")
os.system(failure_script)
failed=True
time.sleep(900)
else:
if(failed):
log.info('Leader is now alive, back to normal')
failed=False
time.sleep(10)
| mit |
chrber/dcache-docker | dcache/deps/.vim/bundle/jedi-vim/jedi_vim.py | 5 | 23606 | """
The Python parts of the Jedi library for VIM. It is mostly about communicating
with VIM.
"""
import traceback # for exception output
import re
import os
import sys
from shlex import split as shsplit
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest # Python 2
is_py3 = sys.version_info[0] >= 3
if is_py3:
unicode = str
class PythonToVimStr(unicode):
""" Vim has a different string implementation of single quotes """
__slots__ = []
def __new__(cls, obj, encoding='UTF-8'):
if is_py3 or isinstance(obj, unicode):
return unicode.__new__(cls, obj)
else:
return unicode.__new__(cls, obj, encoding)
def __repr__(self):
# this is totally stupid and makes no sense but vim/python unicode
# support is pretty bad. don't ask how I came up with this... It just
# works...
# It seems to be related to that bug: http://bugs.python.org/issue5876
if unicode is str:
s = self
else:
s = self.encode('UTF-8')
return '"%s"' % s.replace('\\', '\\\\').replace('"', r'\"')
class VimError(Exception):
def __init__(self, message, throwpoint, executing):
super(type(self), self).__init__(message)
self.message = message
self.throwpoint = throwpoint
self.executing = executing
def __str__(self):
return self.message + '; created by: ' + repr(self.executing)
def _catch_exception(string, is_eval):
"""
Interface between vim and python calls back to it.
Necessary, because the exact error message is not given by `vim.error`.
"""
e = 'jedi#_vim_exceptions(%s, %s)'
result = vim.eval(e % (repr(PythonToVimStr(string, 'UTF-8')), is_eval))
if 'exception' in result:
raise VimError(result['exception'], result['throwpoint'], string)
return result['result']
def vim_command(string):
_catch_exception(string, 0)
def vim_eval(string):
return _catch_exception(string, 1)
def no_jedi_warning(error=None):
msg = "Please install Jedi if you want to use jedi-vim."
if error:
msg = '{} The error was: {}'.format(msg, error)
vim.command('echohl WarningMsg'
'| echom "Please install Jedi if you want to use jedi-vim."'
'| echohl None')
def echo_highlight(msg):
vim_command('echohl WarningMsg | echom "{}" | echohl None'.format(
msg.replace('"', '\\"')))
import vim
try:
import jedi
except ImportError as e:
no_jedi_warning(str(e))
jedi = None
else:
try:
version = jedi.__version__
except Exception as e: # e.g. AttributeError
echo_highlight("Could not load jedi python module: {}".format(e))
jedi = None
else:
if isinstance(version, str):
# the normal use case, now.
from jedi import utils
version = utils.version_info()
if version < (0, 7):
echo_highlight('Please update your Jedi version, it is too old.')
def catch_and_print_exceptions(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except (Exception, vim.error):
print(traceback.format_exc())
return None
return wrapper
def _check_jedi_availability(show_error=False):
def func_receiver(func):
def wrapper(*args, **kwargs):
if jedi is None:
if show_error:
no_jedi_warning()
return
else:
return func(*args, **kwargs)
return wrapper
return func_receiver
@catch_and_print_exceptions
def get_script(source=None, column=None):
jedi.settings.additional_dynamic_modules = \
[b.name for b in vim.buffers if b.name is not None and b.name.endswith('.py')]
if source is None:
source = '\n'.join(vim.current.buffer)
row = vim.current.window.cursor[0]
if column is None:
column = vim.current.window.cursor[1]
buf_path = vim.current.buffer.name
encoding = vim_eval('&encoding') or 'latin1'
return jedi.Script(source, row, column, buf_path, encoding)
@_check_jedi_availability(show_error=False)
@catch_and_print_exceptions
def completions():
row, column = vim.current.window.cursor
# Clear call signatures in the buffer so they aren't seen by the completer.
# Call signatures in the command line can stay.
if vim_eval("g:jedi#show_call_signatures") == '1':
clear_call_signatures()
if vim.eval('a:findstart') == '1':
count = 0
for char in reversed(vim.current.line[:column]):
if not re.match('[\w\d]', char):
break
count += 1
vim.command('return %i' % (column - count))
else:
base = vim.eval('a:base')
source = ''
for i, line in enumerate(vim.current.buffer):
# enter this path again, otherwise source would be incomplete
if i == row - 1:
source += line[:column] + base + line[column:]
else:
source += line
source += '\n'
# here again hacks, because jedi has a different interface than vim
column += len(base)
try:
script = get_script(source=source, column=column)
completions = script.completions()
signatures = script.call_signatures()
out = []
for c in completions:
d = dict(word=PythonToVimStr(c.name[:len(base)] + c.complete),
abbr=PythonToVimStr(c.name),
# stuff directly behind the completion
menu=PythonToVimStr(c.description),
info=PythonToVimStr(c.docstring()), # docstr
icase=1, # case insensitive
dup=1 # allow duplicates (maybe later remove this)
)
out.append(d)
strout = str(out)
except Exception:
# print to stdout, will be in :messages
print(traceback.format_exc())
strout = ''
completions = []
signatures = []
show_call_signatures(signatures)
vim.command('return ' + strout)
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def goto(mode="goto", no_output=False):
"""
:param str mode: "related_name", "definition", "assignment", "auto"
:return: list of definitions/assignments
:rtype: list
"""
script = get_script()
try:
if mode == "goto":
definitions = [x for x in script.goto_definitions()
if not x.in_builtin_module()]
if not definitions:
definitions = script.goto_assignments()
elif mode == "related_name":
definitions = script.usages()
elif mode == "definition":
definitions = script.goto_definitions()
elif mode == "assignment":
definitions = script.goto_assignments()
except jedi.NotFoundError:
echo_highlight("Cannot follow nothing. Put your cursor on a valid name.")
definitions = []
else:
if no_output:
return definitions
if not definitions:
echo_highlight("Couldn't find any definitions for this.")
elif len(definitions) == 1 and mode != "related_name":
# just add some mark to add the current position to the jumplist.
# this is ugly, because it overrides the mark for '`', so if anyone
# has a better idea, let me know.
vim_command('normal! m`')
d = list(definitions)[0]
if d.in_builtin_module():
if d.is_keyword:
echo_highlight("Cannot get the definition of Python keywords.")
else:
echo_highlight("Builtin modules cannot be displayed (%s)."
% d.desc_with_module)
else:
if d.module_path != vim.current.buffer.name:
result = new_buffer(d.module_path)
if not result:
return []
vim.current.window.cursor = d.line, d.column
else:
# multiple solutions
lst = []
for d in definitions:
if d.in_builtin_module():
lst.append(dict(text=PythonToVimStr('Builtin ' + d.description)))
else:
lst.append(dict(filename=PythonToVimStr(d.module_path),
lnum=d.line, col=d.column + 1,
text=PythonToVimStr(d.description)))
vim_eval('setqflist(%s)' % repr(lst))
vim_eval('jedi#add_goto_window(' + str(len(lst)) + ')')
return definitions
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def show_documentation():
script = get_script()
try:
definitions = script.goto_definitions()
except jedi.NotFoundError:
definitions = []
except Exception:
# print to stdout, will be in :messages
definitions = []
print("Exception, this shouldn't happen.")
print(traceback.format_exc())
if not definitions:
echo_highlight('No documentation found for that.')
vim.command('return')
else:
docs = ['Docstring for %s\n%s\n%s' % (d.desc_with_module, '=' * 40, d.docstring())
if d.docstring() else '|No Docstring for %s|' % d for d in definitions]
text = ('\n' + '-' * 79 + '\n').join(docs)
vim.command('let l:doc = %s' % repr(PythonToVimStr(text)))
vim.command('let l:doc_lines = %s' % len(text.split('\n')))
return True
@catch_and_print_exceptions
def clear_call_signatures():
# Check if using command line call signatures
if vim_eval("g:jedi#show_call_signatures") == '2':
vim_command('echo ""')
return
cursor = vim.current.window.cursor
e = vim_eval('g:jedi#call_signature_escape')
# We need two turns here to search and replace certain lines:
# 1. Search for a line with a call signature and save the appended
# characters
# 2. Actually replace the line and redo the status quo.
py_regex = r'%sjedi=([0-9]+), (.*?)%s.*?%sjedi%s'.replace('%s', e)
for i, line in enumerate(vim.current.buffer):
match = re.search(py_regex, line)
if match is not None:
# Some signs were added to minimize syntax changes due to call
# signatures. We have to remove them again. The number of them is
# specified in `match.group(1)`.
after = line[match.end() + int(match.group(1)):]
line = line[:match.start()] + match.group(2) + after
vim.current.buffer[i] = line
vim.current.window.cursor = cursor
@_check_jedi_availability(show_error=False)
@catch_and_print_exceptions
def show_call_signatures(signatures=()):
if vim_eval("has('conceal') && g:jedi#show_call_signatures") == '0':
return
if signatures == ():
signatures = get_script().call_signatures()
clear_call_signatures()
if not signatures:
return
if vim_eval("g:jedi#show_call_signatures") == '2':
return cmdline_call_signatures(signatures)
for i, signature in enumerate(signatures):
line, column = signature.bracket_start
# signatures are listed above each other
line_to_replace = line - i - 1
# because there's a space before the bracket
insert_column = column - 1
if insert_column < 0 or line_to_replace <= 0:
# Edge cases, when the call signature has no space on the screen.
break
# TODO check if completion menu is above or below
line = vim_eval("getline(%s)" % line_to_replace)
params = [p.description.replace('\n', '') for p in signature.params]
try:
# *_*PLACEHOLDER*_* makes something fat. See after/syntax file.
params[signature.index] = '*_*%s*_*' % params[signature.index]
except (IndexError, TypeError):
pass
# This stuff is reaaaaally a hack! I cannot stress enough, that
# this is a stupid solution. But there is really no other yet.
# There is no possibility in VIM to draw on the screen, but there
# will be one (see :help todo Patch to access screen under Python.
# (Marko Mahni, 2010 Jul 18))
text = " (%s) " % ', '.join(params)
text = ' ' * (insert_column - len(line)) + text
end_column = insert_column + len(text) - 2 # -2 due to bold symbols
# Need to decode it with utf8, because vim returns always a python 2
# string even if it is unicode.
e = vim_eval('g:jedi#call_signature_escape')
if hasattr(e, 'decode'):
e = e.decode('UTF-8')
# replace line before with cursor
regex = "xjedi=%sx%sxjedix".replace('x', e)
prefix, replace = line[:insert_column], line[insert_column:end_column]
# Check the replace stuff for strings, to append them
# (don't want to break the syntax)
regex_quotes = r'''\\*["']+'''
# `add` are all the quotation marks.
# join them with a space to avoid producing '''
add = ' '.join(re.findall(regex_quotes, replace))
# search backwards
if add and replace[0] in ['"', "'"]:
a = re.search(regex_quotes + '$', prefix)
add = ('' if a is None else a.group(0)) + add
tup = '%s, %s' % (len(add), replace)
repl = prefix + (regex % (tup, text)) + add + line[end_column:]
vim_eval('setline(%s, %s)' % (line_to_replace, repr(PythonToVimStr(repl))))
@catch_and_print_exceptions
def cmdline_call_signatures(signatures):
def get_params(s):
return [p.description.replace('\n', '') for p in s.params]
if len(signatures) > 1:
params = zip_longest(*map(get_params, signatures), fillvalue='_')
params = ['(' + ', '.join(p) + ')' for p in params]
else:
params = get_params(signatures[0])
text = ', '.join(params).replace('"', '\\"').replace(r'\n', r'\\n')
# Allow 12 characters for ruler/showcmd - setting noruler/noshowcmd
# here causes incorrect undo history
max_msg_len = int(vim_eval('&columns')) - 12
max_num_spaces = (max_msg_len - len(signatures[0].call_name)
- len(text) - 2) # 2 accounts for parentheses
if max_num_spaces < 0:
return # No room for the message
_, column = signatures[0].bracket_start
num_spaces = min(int(vim_eval('g:jedi#first_col +'
'wincol() - col(".")')) +
column - len(signatures[0].call_name),
max_num_spaces)
spaces = ' ' * num_spaces
try:
index = [s.index for s in signatures if isinstance(s.index, int)][0]
escaped_param = params[index].replace(r'\n', r'\\n')
left = text.index(escaped_param)
right = left + len(escaped_param)
vim_command(' echon "%s" | '
'echohl Function | echon "%s" | '
'echohl None | echon "(" | '
'echohl jediFunction | echon "%s" | '
'echohl jediFat | echon "%s" | '
'echohl jediFunction | echon "%s" | '
'echohl None | echon ")"'
% (spaces, signatures[0].call_name, text[:left],
text[left:right], text[right:]))
except (TypeError, IndexError):
vim_command(' echon "%s" | '
'echohl Function | echon "%s" | '
'echohl None | echon "(" | '
'echohl jediFunction | echon "%s" | '
'echohl None | echon ")"'
% (spaces, signatures[0].call_name, text))
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def rename():
if not int(vim.eval('a:0')):
vim_command('augroup jedi_rename')
vim_command('autocmd InsertLeave <buffer> call jedi#rename(1)')
vim_command('augroup END')
vim_command("let s:jedi_replace_orig = expand('<cword>')")
vim_command('normal! diw')
vim_command("let s:jedi_changedtick = b:changedtick")
vim_command('startinsert')
else:
# Remove autocommand.
vim_command('autocmd! jedi_rename InsertLeave')
# Get replacement, if there is something on the cursor.
# This won't be the case when the user ends insert mode right away,
# and `<cword>` would pick up the nearest word instead.
if vim_eval('getline(".")[getpos(".")[2]-1]') != ' ':
replace = vim_eval("expand('<cword>')")
else:
replace = None
cursor = vim.current.window.cursor
# Undo new word, but only if something was changed, which is not the
# case when ending insert mode right away.
if vim_eval('b:changedtick != s:jedi_changedtick') == '1':
vim_command('normal! u') # Undo new word.
vim_command('normal! u') # Undo diw.
vim.current.window.cursor = cursor
if replace:
return do_rename(replace)
def rename_visual():
replace = vim.eval('input("Rename to: ")')
orig = vim.eval('getline(".")[(getpos("\'<")[2]-1):getpos("\'>")[2]]')
do_rename(replace, orig)
def do_rename(replace, orig=None):
if not len(replace):
echo_highlight('No rename possible without name.')
return
if orig is None:
orig = vim_eval('s:jedi_replace_orig')
# Save original window / tab.
saved_tab = int(vim_eval('tabpagenr()'))
saved_win = int(vim_eval('winnr()'))
temp_rename = goto(mode="related_name", no_output=True)
# Sort the whole thing reverse (positions at the end of the line
# must be first, because they move the stuff before the position).
temp_rename = sorted(temp_rename, reverse=True,
key=lambda x: (x.module_path, x.start_pos))
buffers = set()
for r in temp_rename:
if r.in_builtin_module():
continue
if os.path.abspath(vim.current.buffer.name) != r.module_path:
result = new_buffer(r.module_path)
if not result:
echo_highlight("Jedi-vim: failed to create buffer window for {}!".format(r.module_path))
continue
buffers.add(vim.current.buffer.name)
# Save view.
saved_view = vim_eval('string(winsaveview())')
# Replace original word.
vim.current.window.cursor = r.start_pos
vim_command('normal! c{:d}l{}'.format(len(orig), replace))
# Restore view.
vim_command('call winrestview(%s)' % saved_view)
# Restore previous tab and window.
vim_command('tabnext {:d}'.format(saved_tab))
vim_command('{:d}wincmd w'.format(saved_win))
if len(buffers) > 1:
echo_highlight('Jedi did {:d} renames in {:d} buffers!'.format(
len(temp_rename), len(buffers)))
else:
echo_highlight('Jedi did {:d} renames!'.format(len(temp_rename)))
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def py_import():
# args are the same as for the :edit command
args = shsplit(vim.eval('a:args'))
import_path = args.pop()
text = 'import %s' % import_path
scr = jedi.Script(text, 1, len(text), '')
try:
completion = scr.goto_assignments()[0]
except IndexError:
echo_highlight('Cannot find %s in sys.path!' % import_path)
else:
if completion.in_builtin_module():
echo_highlight('%s is a builtin module.' % import_path)
else:
cmd_args = ' '.join([a.replace(' ', '\\ ') for a in args])
new_buffer(completion.module_path, cmd_args)
@catch_and_print_exceptions
def py_import_completions():
argl = vim.eval('a:argl')
try:
import jedi
except ImportError:
print('Pyimport completion requires jedi module: https://github.com/davidhalter/jedi')
comps = []
else:
text = 'import %s' % argl
script = jedi.Script(text, 1, len(text), '')
comps = ['%s%s' % (argl, c.complete) for c in script.completions()]
vim.command("return '%s'" % '\n'.join(comps))
@catch_and_print_exceptions
def new_buffer(path, options=''):
# options are what you can to edit the edit options
if vim_eval('g:jedi#use_tabs_not_buffers') == '1':
_tabnew(path, options)
elif not vim_eval('g:jedi#use_splits_not_buffers') == '1':
user_split_option = vim_eval('g:jedi#use_splits_not_buffers')
split_options = {
'top': 'topleft split',
'left': 'topleft vsplit',
'right': 'botright vsplit',
'bottom': 'botright split',
'winwidth': 'vs'
}
if user_split_option == 'winwidth' and vim.current.window.width <= 2 * int(vim_eval("&textwidth ? &textwidth : 80")):
split_options['winwidth'] = 'sp'
if user_split_option not in split_options:
print('g:jedi#use_splits_not_buffers value is not correct, valid options are: %s' % ','.join(split_options.keys()))
else:
vim_command(split_options[user_split_option] + " %s" % path)
else:
if vim_eval("!&hidden && &modified") == '1':
if vim_eval("bufname('%')") is None:
echo_highlight('Cannot open a new buffer, use `:set hidden` or save your buffer')
return False
else:
vim_command('w')
vim_command('edit %s %s' % (options, escape_file_path(path)))
# sometimes syntax is being disabled and the filetype not set.
if vim_eval('!exists("g:syntax_on")') == '1':
vim_command('syntax enable')
if vim_eval("&filetype != 'python'") == '1':
vim_command('set filetype=python')
return True
@catch_and_print_exceptions
def _tabnew(path, options=''):
"""
Open a file in a new tab or switch to an existing one.
:param options: `:tabnew` options, read vim help.
"""
path = os.path.abspath(path)
if vim_eval('has("gui")') == '1':
vim_command('tab drop %s %s' % (options, escape_file_path(path)))
return
for tab_nr in range(int(vim_eval("tabpagenr('$')"))):
for buf_nr in vim_eval("tabpagebuflist(%i + 1)" % tab_nr):
buf_nr = int(buf_nr) - 1
try:
buf_path = vim.buffers[buf_nr].name
except (LookupError, ValueError):
# Just do good old asking for forgiveness.
# don't know why this happens :-)
pass
else:
if buf_path == path:
# tab exists, just switch to that tab
vim_command('tabfirst | tabnext %i' % (tab_nr + 1))
# Goto the buffer's window.
vim_command('exec bufwinnr(%i) . " wincmd w"' % (buf_nr + 1))
break
else:
continue
break
else:
# tab doesn't exist, add a new one.
vim_command('tabnew %s' % escape_file_path(path))
def escape_file_path(path):
return path.replace(' ', r'\ ')
def print_to_stdout(level, str_out):
print(str_out)
| gpl-3.0 |
nioinnovation/nio-cli | tests/test_buildrelease.py | 2 | 1172 | import unittest
from unittest import skipIf
try:
from nio_cli.commands.buildrelease import BuildRelease
niocore_installed = True
except:
niocore_installed = False
class TestCLI(unittest.TestCase):
@skipIf(not niocore_installed, 'niocore required for buildrelease')
def test_buildrelease_git_remote_url_parse(self):
"""Support multiple repo url formats from `git remote -v`"""
buildrelease = BuildRelease({
'<repo-name>': '', '--ip': '', '--port': '',
})
parse = buildrelease.parse_url_from_git_remote_command
self.assertEqual(
parse(b"origin git@github.com:nio-blocks/repo.git (fetch)"),
"git://github.com/nio-blocks/repo.git")
self.assertEqual(
parse(b"origin git@github.com:/nio-blocks/repo (fetch)"),
"git://github.com/nio-blocks/repo.git")
self.assertEqual(
parse(b"origin https://github.com/nio-blocks/repo (fetch)"),
"git://github.com/nio-blocks/repo.git")
self.assertEqual(
parse(b"origin https://1.2.3.4/nio-blocks/repo (fetch)"),
"git://1.2.3.4/nio-blocks/repo.git")
| apache-2.0 |
ludovicoloreti/LexicApp | node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py | 1366 | 120842 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub(r'\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError('Strong dict for key ' + key + ' in ' + \
self.__class__.__name__)
else:
that._properties[key] = value.copy()
else:
raise TypeError('Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__)
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError(self.__class__.__name__ + ' must implement Name')
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError(
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name()))
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError("Can't make " + value.__class__.__name__ + ' printable')
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError(property + ' not in ' + self.__class__.__name__)
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError(
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__)
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__)
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__)
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError("Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__)
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError(key + ' not in ' + self.__class__.__name__)
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list')
if not isinstance(value, property_type):
raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__)
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError(self.__class__.__name__ + ' requires ' + property)
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError('Found multiple children with path ' + child_path)
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError('Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path))
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'kext': 'wrapper.kext',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'swift': 'sourcecode.swift',
'ttf': 'file',
'xcassets': 'folder.assetcatalog',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xcdatamodeld':'wrapper.xcdatamodeld',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError(name)
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError('Variant values for ' + key)
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError(
self.__class__.__name__ + ' must implement FileGroup')
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError('Found multiple build files with path ' + path)
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError('Found multiple build files for ' + \
xcfilelikeelement.Name())
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_FRAMEWORKS_DIR': 10, # Frameworks Directory
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError('Can\'t use path %s in a %s' % \
(path, self.__class__.__name__))
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the file name
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.application.watchapp': ['wrapper.application',
'', '.app'],
'com.apple.product-type.watchkit-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.app-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
'com.apple.product-type.kernel-extension': ['wrapper.kext',
'', '.kext'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
# Extension override.
suffix = '.' + force_extension
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False)
targets = other_pbxproject.GetProperty('targets')
if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets):
dir_path = project_ref._properties['path']
product_group._hashables.extend(dir_path)
return [product_group, project_ref]
def _AllSymrootsUnique(self, target, inherit_unique_symroot):
# Returns True if all configurations have a unique 'SYMROOT' attribute.
# The value of inherit_unique_symroot decides, if a configuration is assumed
# to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't
# define an explicit value for 'SYMROOT'.
symroots = self._DefinedSymroots(target)
for s in self._DefinedSymroots(target):
if (s is not None and not self._IsUniqueSymrootForTarget(s) or
s is None and not inherit_unique_symroot):
return False
return True if symroots else inherit_unique_symroot
def _DefinedSymroots(self, target):
# Returns all values for the 'SYMROOT' attribute defined in all
# configurations for this target. If any configuration doesn't define the
# 'SYMROOT' attribute, None is added to the returned set. If all
# configurations don't define the 'SYMROOT' attribute, an empty set is
# returned.
config_list = target.GetProperty('buildConfigurationList')
symroots = set()
for config in config_list.GetProperty('buildConfigurations'):
setting = config.GetProperty('buildSettings')
if 'SYMROOT' in setting:
symroots.add(setting['SYMROOT'])
else:
symroots.add(None)
if len(symroots) == 1 and None in symroots:
return set()
return symroots
def _IsUniqueSymrootForTarget(self, symroot):
# This method returns True if all configurations in target contain a
# 'SYMROOT' attribute that is unique for the given target. A value is
# unique, if the Xcode macro '$SRCROOT' appears in it in any form.
uniquifier = ['$SRCROOT', '$(SRCROOT)']
if any(x in symroot for x in uniquifier):
return True
return False
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 46],
'rootObject': [0, PBXProject, 1, 1],
})
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
| mit |
akashlevy/Lyff | lyff_lambda/boto/ec2containerservice/layer1.py | 135 | 31044 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.ec2containerservice import exceptions
class EC2ContainerServiceConnection(AWSQueryConnection):
"""
Amazon EC2 Container Service (Amazon ECS) is a highly scalable,
fast, container management service that makes it easy to run,
stop, and manage Docker containers on a cluster of Amazon EC2
instances. Amazon ECS lets you launch and stop container-enabled
applications with simple API calls, allows you to get the state of
your cluster from a centralized service, and gives you access to
many familiar Amazon EC2 features like security groups, Amazon EBS
volumes, and IAM roles.
You can use Amazon ECS to schedule the placement of containers
across your cluster based on your resource needs, isolation
policies, and availability requirements. Amazon EC2 Container
Service eliminates the need for you to operate your own cluster
management and configuration management systems or worry about
scaling your management infrastructure.
"""
APIVersion = "2014-11-13"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "ecs.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"ServerException": exceptions.ServerException,
"ClientException": exceptions.ClientException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(EC2ContainerServiceConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_cluster(self, cluster_name=None):
"""
Creates a new Amazon ECS cluster. By default, your account
will receive a `default` cluster when you launch your first
container instance. However, you can create your own cluster
with a unique name with the `CreateCluster` action.
During the preview, each account is limited to two clusters.
:type cluster_name: string
:param cluster_name: The name of your cluster. If you do not specify a
name for your cluster, you will create a cluster named `default`.
"""
params = {}
if cluster_name is not None:
params['clusterName'] = cluster_name
return self._make_request(
action='CreateCluster',
verb='POST',
path='/', params=params)
def delete_cluster(self, cluster):
"""
Deletes the specified cluster. You must deregister all
container instances from this cluster before you may delete
it. You can list the container instances in a cluster with
ListContainerInstances and deregister them with
DeregisterContainerInstance.
:type cluster: string
:param cluster: The cluster you want to delete.
"""
params = {'cluster': cluster, }
return self._make_request(
action='DeleteCluster',
verb='POST',
path='/', params=params)
def deregister_container_instance(self, container_instance, cluster=None,
force=None):
"""
Deregisters an Amazon ECS container instance from the
specified cluster. This instance will no longer be available
to run tasks.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the container instance you want to
deregister. If you do not specify a cluster, the default cluster is
assumed.
:type container_instance: string
:param container_instance: The container instance UUID or full Amazon
Resource Name (ARN) of the container instance you want to
deregister. The ARN contains the `arn:aws:ecs` namespace, followed
by the region of the container instance, the AWS account ID of the
container instance owner, the `container-instance` namespace, and
then the container instance UUID. For example, arn:aws:ecs: region
: aws_account_id :container-instance/ container_instance_UUID .
:type force: boolean
:param force: Force the deregistration of the container instance. You
can use the `force` parameter if you have several tasks running on
a container instance and you don't want to run `StopTask` for each
task before deregistering the container instance.
"""
params = {'containerInstance': container_instance, }
if cluster is not None:
params['cluster'] = cluster
if force is not None:
params['force'] = str(
force).lower()
return self._make_request(
action='DeregisterContainerInstance',
verb='POST',
path='/', params=params)
def deregister_task_definition(self, task_definition):
"""
Deregisters the specified task definition. You will no longer
be able to run tasks from this definition after
deregistration.
:type task_definition: string
:param task_definition: The `family` and `revision` (
`family:revision`) or full Amazon Resource Name (ARN) of the task
definition that you want to deregister.
"""
params = {'taskDefinition': task_definition, }
return self._make_request(
action='DeregisterTaskDefinition',
verb='POST',
path='/', params=params)
def describe_clusters(self, clusters=None):
"""
Describes one or more of your clusters.
:type clusters: list
:param clusters: A space-separated list of cluster names or full
cluster Amazon Resource Name (ARN) entries. If you do not specify a
cluster, the default cluster is assumed.
"""
params = {}
if clusters is not None:
self.build_list_params(params,
clusters,
'clusters.member')
return self._make_request(
action='DescribeClusters',
verb='POST',
path='/', params=params)
def describe_container_instances(self, container_instances, cluster=None):
"""
Describes Amazon EC2 Container Service container instances.
Returns metadata about registered and remaining resources on
each container instance requested.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the container instances you want to
describe. If you do not specify a cluster, the default cluster is
assumed.
:type container_instances: list
:param container_instances: A space-separated list of container
instance UUIDs or full Amazon Resource Name (ARN) entries.
"""
params = {}
self.build_list_params(params,
container_instances,
'containerInstances.member')
if cluster is not None:
params['cluster'] = cluster
return self._make_request(
action='DescribeContainerInstances',
verb='POST',
path='/', params=params)
def describe_task_definition(self, task_definition):
"""
Describes a task definition.
:type task_definition: string
:param task_definition: The `family` and `revision` (
`family:revision`) or full Amazon Resource Name (ARN) of the task
definition that you want to describe.
"""
params = {'taskDefinition': task_definition, }
return self._make_request(
action='DescribeTaskDefinition',
verb='POST',
path='/', params=params)
def describe_tasks(self, tasks, cluster=None):
"""
Describes a specified task or tasks.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the task you want to describe. If you do not
specify a cluster, the default cluster is assumed.
:type tasks: list
:param tasks: A space-separated list of task UUIDs or full Amazon
Resource Name (ARN) entries.
"""
params = {}
self.build_list_params(params,
tasks,
'tasks.member')
if cluster is not None:
params['cluster'] = cluster
return self._make_request(
action='DescribeTasks',
verb='POST',
path='/', params=params)
def discover_poll_endpoint(self, container_instance=None):
"""
This action is only used by the Amazon EC2 Container Service
agent, and it is not intended for use outside of the agent.
Returns an endpoint for the Amazon EC2 Container Service agent
to poll for updates.
:type container_instance: string
:param container_instance: The container instance UUID or full Amazon
Resource Name (ARN) of the container instance. The ARN contains the
`arn:aws:ecs` namespace, followed by the region of the container
instance, the AWS account ID of the container instance owner, the
`container-instance` namespace, and then the container instance
UUID. For example, arn:aws:ecs: region : aws_account_id :container-
instance/ container_instance_UUID .
"""
params = {}
if container_instance is not None:
params['containerInstance'] = container_instance
return self._make_request(
action='DiscoverPollEndpoint',
verb='POST',
path='/', params=params)
def list_clusters(self, next_token=None, max_results=None):
"""
Returns a list of existing clusters.
:type next_token: string
:param next_token: The `nextToken` value returned from a previous
paginated `ListClusters` request where `maxResults` was used and
the results exceeded the value of that parameter. Pagination
continues from the end of the previous results that returned the
`nextToken` value. This value is `null` when there are no more
results to return.
:type max_results: integer
:param max_results: The maximum number of cluster results returned by
`ListClusters` in paginated output. When this parameter is used,
`ListClusters` only returns `maxResults` results in a single page
along with a `nextToken` response element. The remaining results of
the initial request can be seen by sending another `ListClusters`
request with the returned `nextToken` value. This value can be
between 1 and 100. If this parameter is not used, then
`ListClusters` returns up to 100 results and a `nextToken` value if
applicable.
"""
params = {}
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self._make_request(
action='ListClusters',
verb='POST',
path='/', params=params)
def list_container_instances(self, cluster=None, next_token=None,
max_results=None):
"""
Returns a list of container instances in a specified cluster.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the container instances you want to list. If
you do not specify a cluster, the default cluster is assumed..
:type next_token: string
:param next_token: The `nextToken` value returned from a previous
paginated `ListContainerInstances` request where `maxResults` was
used and the results exceeded the value of that parameter.
Pagination continues from the end of the previous results that
returned the `nextToken` value. This value is `null` when there are
no more results to return.
:type max_results: integer
:param max_results: The maximum number of container instance results
returned by `ListContainerInstances` in paginated output. When this
parameter is used, `ListContainerInstances` only returns
`maxResults` results in a single page along with a `nextToken`
response element. The remaining results of the initial request can
be seen by sending another `ListContainerInstances` request with
the returned `nextToken` value. This value can be between 1 and
100. If this parameter is not used, then `ListContainerInstances`
returns up to 100 results and a `nextToken` value if applicable.
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self._make_request(
action='ListContainerInstances',
verb='POST',
path='/', params=params)
def list_task_definitions(self, family_prefix=None, next_token=None,
max_results=None):
"""
Returns a list of task definitions that are registered to your
account. You can filter the results by family name with the
`familyPrefix` parameter.
:type family_prefix: string
:param family_prefix: The name of the family that you want to filter
the `ListTaskDefinitions` results with. Specifying a `familyPrefix`
will limit the listed task definitions to definitions that belong
to that family.
:type next_token: string
:param next_token: The `nextToken` value returned from a previous
paginated `ListTaskDefinitions` request where `maxResults` was used
and the results exceeded the value of that parameter. Pagination
continues from the end of the previous results that returned the
`nextToken` value. This value is `null` when there are no more
results to return.
:type max_results: integer
:param max_results: The maximum number of task definition results
returned by `ListTaskDefinitions` in paginated output. When this
parameter is used, `ListTaskDefinitions` only returns `maxResults`
results in a single page along with a `nextToken` response element.
The remaining results of the initial request can be seen by sending
another `ListTaskDefinitions` request with the returned `nextToken`
value. This value can be between 1 and 100. If this parameter is
not used, then `ListTaskDefinitions` returns up to 100 results and
a `nextToken` value if applicable.
"""
params = {}
if family_prefix is not None:
params['familyPrefix'] = family_prefix
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self._make_request(
action='ListTaskDefinitions',
verb='POST',
path='/', params=params)
def list_tasks(self, cluster=None, container_instance=None, family=None,
next_token=None, max_results=None):
"""
Returns a list of tasks for a specified cluster. You can
filter the results by family name or by a particular container
instance with the `family` and `containerInstance` parameters.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the tasks you want to list. If you do not
specify a cluster, the default cluster is assumed..
:type container_instance: string
:param container_instance: The container instance UUID or full Amazon
Resource Name (ARN) of the container instance that you want to
filter the `ListTasks` results with. Specifying a
`containerInstance` will limit the results to tasks that belong to
that container instance.
:type family: string
:param family: The name of the family that you want to filter the
`ListTasks` results with. Specifying a `family` will limit the
results to tasks that belong to that family.
:type next_token: string
:param next_token: The `nextToken` value returned from a previous
paginated `ListTasks` request where `maxResults` was used and the
results exceeded the value of that parameter. Pagination continues
from the end of the previous results that returned the `nextToken`
value. This value is `null` when there are no more results to
return.
:type max_results: integer
:param max_results: The maximum number of task results returned by
`ListTasks` in paginated output. When this parameter is used,
`ListTasks` only returns `maxResults` results in a single page
along with a `nextToken` response element. The remaining results of
the initial request can be seen by sending another `ListTasks`
request with the returned `nextToken` value. This value can be
between 1 and 100. If this parameter is not used, then `ListTasks`
returns up to 100 results and a `nextToken` value if applicable.
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if container_instance is not None:
params['containerInstance'] = container_instance
if family is not None:
params['family'] = family
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self._make_request(
action='ListTasks',
verb='POST',
path='/', params=params)
def register_container_instance(self, cluster=None,
instance_identity_document=None,
instance_identity_document_signature=None,
total_resources=None):
"""
This action is only used by the Amazon EC2 Container Service
agent, and it is not intended for use outside of the agent.
Registers an Amazon EC2 instance into the specified cluster.
This instance will become available to place containers on.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that you want to register your container instance with.
If you do not specify a cluster, the default cluster is assumed..
:type instance_identity_document: string
:param instance_identity_document:
:type instance_identity_document_signature: string
:param instance_identity_document_signature:
:type total_resources: list
:param total_resources:
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if instance_identity_document is not None:
params['instanceIdentityDocument'] = instance_identity_document
if instance_identity_document_signature is not None:
params['instanceIdentityDocumentSignature'] = instance_identity_document_signature
if total_resources is not None:
self.build_complex_list_params(
params, total_resources,
'totalResources.member',
('name', 'type', 'doubleValue', 'longValue', 'integerValue', 'stringSetValue'))
return self._make_request(
action='RegisterContainerInstance',
verb='POST',
path='/', params=params)
def register_task_definition(self, family, container_definitions):
"""
Registers a new task definition from the supplied `family` and
`containerDefinitions`.
:type family: string
:param family: You can specify a `family` for a task definition, which
allows you to track multiple versions of the same task definition.
You can think of the `family` as a name for your task definition.
:type container_definitions: list
:param container_definitions: A list of container definitions in JSON
format that describe the different containers that make up your
task.
"""
params = {'family': family, }
self.build_complex_list_params(
params, container_definitions,
'containerDefinitions.member',
('name', 'image', 'cpu', 'memory', 'links', 'portMappings', 'essential', 'entryPoint', 'command', 'environment'))
return self._make_request(
action='RegisterTaskDefinition',
verb='POST',
path='/', params=params)
def run_task(self, task_definition, cluster=None, overrides=None,
count=None):
"""
Start a task using random placement and the default Amazon ECS
scheduler. If you want to use your own scheduler or place a
task on a specific container instance, use `StartTask`
instead.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that you want to run your task on. If you do not
specify a cluster, the default cluster is assumed..
:type task_definition: string
:param task_definition: The `family` and `revision` (
`family:revision`) or full Amazon Resource Name (ARN) of the task
definition that you want to run.
:type overrides: dict
:param overrides:
:type count: integer
:param count: The number of instances of the specified task that you
would like to place on your cluster.
"""
params = {'taskDefinition': task_definition, }
if cluster is not None:
params['cluster'] = cluster
if overrides is not None:
params['overrides'] = overrides
if count is not None:
params['count'] = count
return self._make_request(
action='RunTask',
verb='POST',
path='/', params=params)
def start_task(self, task_definition, container_instances, cluster=None,
overrides=None):
"""
Starts a new task from the specified task definition on the
specified container instance or instances. If you want to use
the default Amazon ECS scheduler to place your task, use
`RunTask` instead.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that you want to start your task on. If you do not
specify a cluster, the default cluster is assumed..
:type task_definition: string
:param task_definition: The `family` and `revision` (
`family:revision`) or full Amazon Resource Name (ARN) of the task
definition that you want to start.
:type overrides: dict
:param overrides:
:type container_instances: list
:param container_instances: The container instance UUIDs or full Amazon
Resource Name (ARN) entries for the container instances on which
you would like to place your task.
"""
params = {'taskDefinition': task_definition, }
self.build_list_params(params,
container_instances,
'containerInstances.member')
if cluster is not None:
params['cluster'] = cluster
if overrides is not None:
params['overrides'] = overrides
return self._make_request(
action='StartTask',
verb='POST',
path='/', params=params)
def stop_task(self, task, cluster=None):
"""
Stops a running task.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the task you want to stop. If you do not
specify a cluster, the default cluster is assumed..
:type task: string
:param task: The task UUIDs or full Amazon Resource Name (ARN) entry of
the task you would like to stop.
"""
params = {'task': task, }
if cluster is not None:
params['cluster'] = cluster
return self._make_request(
action='StopTask',
verb='POST',
path='/', params=params)
def submit_container_state_change(self, cluster=None, task=None,
container_name=None, status=None,
exit_code=None, reason=None,
network_bindings=None):
"""
This action is only used by the Amazon EC2 Container Service
agent, and it is not intended for use outside of the agent.
Sent to acknowledge that a container changed states.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the container.
:type task: string
:param task: The task UUID or full Amazon Resource Name (ARN) of the
task that hosts the container.
:type container_name: string
:param container_name: The name of the container.
:type status: string
:param status: The status of the state change request.
:type exit_code: integer
:param exit_code: The exit code returned for the state change request.
:type reason: string
:param reason: The reason for the state change request.
:type network_bindings: list
:param network_bindings: The network bindings of the container.
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if task is not None:
params['task'] = task
if container_name is not None:
params['containerName'] = container_name
if status is not None:
params['status'] = status
if exit_code is not None:
params['exitCode'] = exit_code
if reason is not None:
params['reason'] = reason
if network_bindings is not None:
self.build_complex_list_params(
params, network_bindings,
'networkBindings.member',
('bindIP', 'containerPort', 'hostPort'))
return self._make_request(
action='SubmitContainerStateChange',
verb='POST',
path='/', params=params)
def submit_task_state_change(self, cluster=None, task=None, status=None,
reason=None):
"""
This action is only used by the Amazon EC2 Container Service
agent, and it is not intended for use outside of the agent.
Sent to acknowledge that a task changed states.
:type cluster: string
:param cluster: The short name or full Amazon Resource Name (ARN) of
the cluster that hosts the task.
:type task: string
:param task: The task UUID or full Amazon Resource Name (ARN) of the
task in the state change request.
:type status: string
:param status: The status of the state change request.
:type reason: string
:param reason: The reason for the state change request.
"""
params = {}
if cluster is not None:
params['cluster'] = cluster
if task is not None:
params['task'] = task
if status is not None:
params['status'] = status
if reason is not None:
params['reason'] = reason
return self._make_request(
action='SubmitTaskStateChange',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
pridemusvaire/youtube-dl | youtube_dl/extractor/keek.py | 119 | 1601 | from __future__ import unicode_literals
from .common import InfoExtractor
class KeekIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<id>\w+)'
IE_NAME = 'keek'
_TEST = {
'url': 'https://www.keek.com/ytdl/keeks/NODfbab',
'md5': '09c5c109067536c1cec8bac8c21fea05',
'info_dict': {
'id': 'NODfbab',
'ext': 'mp4',
'uploader': 'youtube-dl project',
'uploader_id': 'ytdl',
'title': 'test chars: "\'/\\\u00e4<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de .',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
video_url = 'http://cdn.keek.com/keek/video/%s' % video_id
thumbnail = 'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
webpage = self._download_webpage(url, video_id)
raw_desc = self._html_search_meta('description', webpage)
if raw_desc:
uploader = self._html_search_regex(
r'Watch (.*?)\s+\(', raw_desc, 'uploader', fatal=False)
uploader_id = self._html_search_regex(
r'Watch .*?\(@(.+?)\)', raw_desc, 'uploader_id', fatal=False)
else:
uploader = None
uploader_id = None
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': self._og_search_title(webpage),
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_id': uploader_id,
}
| unlicense |
ging/horizon | openstack_dashboard/dashboards/endpoints_management/endpoints_management/forms.py | 1 | 5838 | # Copyright (C) 2016 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import uuid
from horizon import forms
from horizon import messages
from horizon import exceptions
from django import shortcuts
from django.template.defaultfilters import register
from django.core.urlresolvers import reverse_lazy
from django.utils.datastructures import SortedDict
from keystoneclient import exceptions as ks_exceptions
from openstack_dashboard.fiware_api import keystone
from openstack_dashboard.dashboards.endpoints_management import utils
LOG = logging.getLogger('idm_logger')
class UpdateEndpointsForm(forms.SelfHandlingForm):
description = 'Update Service Endpoints'
template = 'endpoints_management/endpoints_management/_endpoints.html'
def __init__(self, *args, **kwargs):
self.service = kwargs.pop('service')
self.endpoints_list = kwargs.pop('endpoints_list')
super(UpdateEndpointsForm, self).__init__(*args, **kwargs)
fields = SortedDict()
initial = {}
for region in self.request.session['endpoints_allowed_regions']:
for interface in ['public', 'internal', 'admin']:
field_ID = '_'.join([self.service.name, region, interface])
fields[field_ID] = forms.CharField(label=interface.capitalize(),
required=True,
widget=forms.TextInput(
attrs={'class': 'endpoint_input'})
)
fields.keyOrder.append(field_ID)
if self.endpoints_list:
self.service_enabled = True
for endpoint in self.endpoints_list:
field_ID = '_'.join([self.service.name, endpoint.region, endpoint.interface])
initial[field_ID] = endpoint.url
else:
self.service_enabled = False
self.fields = fields
self.initial = initial
self.service_account_name = keystone.get_service_account_name(self.request,
self.service.name,
self.request.session['endpoints_user_region'])
def handle(self, request, data):
for field_ID, new_url in data.iteritems():
service_name, region, interface = field_ID.split('_')
# check if the endpoint already exists
endpoint = next((e for e in self.endpoints_list if e.region_id == region and e.interface == interface), None)
if not endpoint:
# create new endpoint
keystone.endpoint_create(request, service=self.service.id, url=new_url, interface=interface, region=region)
elif new_url != '' and new_url != endpoint.url:
# update endpoint
keystone.endpoint_update(request, endpoint_id=endpoint.id, endpoint_new_url=new_url)
self._create_endpoint_group_for_region(request)
# create service account if it does not exist
try:
keystone.user_get(request, self.service_account_name)
except ks_exceptions.NotFound:
self._create_service_account(request)
# display success messages
messages.success(request, 'Endpoints updated for your region.')
return shortcuts.redirect('horizon:endpoints_management:endpoints_management:service', self.service.name)
def _create_endpoint_group_for_region(self, request):
for region in request.session['endpoints_allowed_regions']:
endpoint_group_for_region = [
eg for eg in keystone.endpoint_group_list(request)
if eg.filters.get('region_id', None) == region
]
if not endpoint_group_for_region:
LOG.debug('Creating endpoint_group for region {0}'.format(region))
keystone.endpoint_group_create(
request=request,
name=region + ' Region Group',
region_id=region)
def _create_service_account(self, request):
region = request.session['endpoints_user_region']
password = uuid.uuid4().hex
service_account = keystone.create_service_account(request=request,
password=password,
service=self.service.name,
region=region)
request.session['new_service_password'] = password
request.session['new_service_name'] = self.service.name
@register.filter(name='filter_region')
def filter_region(form, region_id):
filtered_fields = SortedDict()
for field in form.fields:
service_name, region, interface = field.split('_')
if region == region_id:
filtered_fields[field] = form.fields[field]
filtered_form = UpdateEndpointsForm(request=form.request,
service=form.service,
endpoints_list=form.endpoints_list)
filtered_form.fields = filtered_fields
return filtered_form
| apache-2.0 |
GitHublong/hue | desktop/core/ext-py/Django-1.6.10/tests/test_client/tests.py | 49 | 22706 | # coding: utf-8
"""
39. Testing using the Test Client
The test client is a class that can act like a simple
browser for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
``Client`` objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the ``Client`` instance.
This is not intended as a replacement for Twill, Selenium, or
other browser automation frameworks - it is here to allow
testing against the contexts and templates produced by a view,
rather than the HTML rendered to the end-user.
"""
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.core import mail
from django.test import Client, TestCase, RequestFactory
from django.test.utils import override_settings
from .views import get_view
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class ClientTest(TestCase):
fixtures = ['testdata.json']
def test_get_view(self):
"GET a view"
# The data is ignored, but let's check it doesn't crash the system
# anyway.
data = {'var': '\xf2'}
response = self.client.get('/test_client/get_view/', data)
# Check some response details
self.assertContains(response, 'This is a test')
self.assertEqual(response.context['var'], '\xf2')
self.assertEqual(response.templates[0].name, 'GET Template')
def test_get_post_view(self):
"GET a view that normally expects POSTs"
response = self.client.get('/test_client/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty GET Template')
self.assertTemplateNotUsed(response, 'Empty POST Template')
def test_empty_post(self):
"POST an empty dictionary to a view"
response = self.client.post('/test_client/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty POST Template')
self.assertTemplateNotUsed(response, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty POST Template')
def test_post(self):
"POST some data to a view"
post_data = {
'value': 37
}
response = self.client.post('/test_client/post_view/', post_data)
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], '37')
self.assertEqual(response.templates[0].name, 'POST Template')
self.assertContains(response, 'Data received')
def test_response_headers(self):
"Check the value of HTTP headers returned in a response"
response = self.client.get("/test_client/header_view/")
self.assertEqual(response['X-DJANGO-TEST'], 'Slartibartfast')
def test_raw_post(self):
"POST raw data (with a content type) to a view"
test_doc = """<?xml version="1.0" encoding="utf-8"?><library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library>"""
response = self.client.post("/test_client/raw_post_view/", test_doc,
content_type="text/xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, b"Blink - Malcolm Gladwell")
def test_redirect(self):
"GET a URL that redirects elsewhere"
response = self.client.get('/test_client/redirect_view/')
# Check that the response was a 302 (redirect) and that
# assertRedirect() understands to put an implicit http://testserver/ in
# front of non-absolute URLs.
self.assertRedirects(response, '/test_client/get_view/')
host = 'django.testserver'
client_providing_host = Client(HTTP_HOST=host)
response = client_providing_host.get('/test_client/redirect_view/')
# Check that the response was a 302 (redirect) with absolute URI
self.assertRedirects(response, '/test_client/get_view/', host=host)
def test_redirect_with_query(self):
"GET a URL that redirects with given GET parameters"
response = self.client.get('/test_client/redirect_view/', {'var': 'value'})
# Check if parameters are intact
self.assertRedirects(response, 'http://testserver/test_client/get_view/?var=value')
def test_permanent_redirect(self):
"GET a URL that redirects permanently elsewhere"
response = self.client.get('/test_client/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect)
self.assertRedirects(response, 'http://testserver/test_client/get_view/', status_code=301)
client_providing_host = Client(HTTP_HOST='django.testserver')
response = client_providing_host.get('/test_client/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect) with absolute URI
self.assertRedirects(response, 'http://django.testserver/test_client/get_view/', status_code=301)
def test_temporary_redirect(self):
"GET a URL that does a non-permanent redirect"
response = self.client.get('/test_client/temporary_redirect_view/')
# Check that the response was a 302 (non-permanent redirect)
self.assertRedirects(response, 'http://testserver/test_client/get_view/', status_code=302)
def test_redirect_to_strange_location(self):
"GET a URL that redirects to a non-200 page"
response = self.client.get('/test_client/double_redirect_view/')
# Check that the response was a 302, and that
# the attempt to get the redirection location returned 301 when retrieved
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/', target_status_code=301)
def test_follow_redirect(self):
"A URL that redirects can be followed to termination."
response = self.client.get('/test_client/double_redirect_view/', follow=True)
self.assertRedirects(response, 'http://testserver/test_client/get_view/', status_code=302, target_status_code=200)
self.assertEqual(len(response.redirect_chain), 2)
def test_redirect_http(self):
"GET a URL that redirects to an http URI"
response = self.client.get('/test_client/http_redirect_view/',follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
"GET a URL that redirects to an https URI"
response = self.client.get('/test_client/https_redirect_view/',follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
"GET a URL that responds as '404:Not Found'"
response = self.client.get('/test_client/bad_view/')
# Check that the response was a 404, and that the content contains MAGIC
self.assertContains(response, 'MAGIC', status_code=404)
def test_valid_form(self):
"POST valid data to a form"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
"GET a form, providing hints in the GET data"
hints = {
'text': 'Hello World',
'multi': ('b','c','e')
}
response = self.client.get('/test_client/form_view/', data=hints)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Form GET Template")
# Check that the multi-value data has been rolled out ok
self.assertContains(response, 'Select a valid choice.', 0)
def test_incomplete_data_form(self):
"POST incomplete data to a form"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertContains(response, 'This field is required.', 3)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, 'form_view.html')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid email address.')
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get('/test_client/unknown_view/')
# Check that the response was a 404
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
"Make sure that URL ;-parameters are not stripped."
response = self.client.get('/test_client/unknown_view/;some-parameter')
# Check that the path in the response includes it (ignore that it's a 404)
self.assertEqual(response.request['PATH_INFO'], '/test_client/unknown_view/;some-parameter')
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/login_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/login_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/login_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/login_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_login_and_custom_redirect(self):
"Request a page that is protected with @login_required(redirect_field_name='redirect_to')"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/login_protected_view_custom_redirect/')
self.assertRedirects(response, 'http://testserver/accounts/login/?redirect_to=/test_client/login_protected_view_custom_redirect/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username='otheruser', password='nopassword')
self.assertFalse(login)
def test_view_with_inactive_login(self):
"Request a page that is protected with @login, but use an inactive login"
login = self.client.login(username='inactive', password='password')
self.assertFalse(login)
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username='testclient', password='password')
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/login_protected_view/')
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies")
def test_logout_cookie_sessions(self):
self.test_logout()
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/permission_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/permission_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/test_client/permission_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/permission_protected_view/')
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
"Request a page that is protected with @permission_required but raises a exception"
# Get the page without logging in. Should result in 403.
response = self.client.get('/test_client/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 403.
response = self.client.get('/test_client/permission_protected_view_exception/')
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/permission_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/permission_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.assertTrue(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/test_client/permission_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/permission_protected_method_view/')
# TODO: Log in with right permissions and request the page again
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
try:
self.client.session['tobacconist']
self.fail("Shouldn't have a session value")
except KeyError:
pass
from django.contrib.sessions.models import Session
response = self.client.post('/test_client/session_view/')
# Check that the session was modified
self.assertEqual(self.client.session['tobacconist'], 'hovercraft')
def test_view_with_exception(self):
"Request a page that is known to throw an error"
self.assertRaises(KeyError, self.client.get, "/test_client/broken_view/")
#Try the same assertion, a different way
try:
self.client.get('/test_client/broken_view/')
self.fail('Should raise an error')
except KeyError:
pass
def test_mail_sending(self):
"Test that mail is redirected to a dummy outbox during test setup"
response = self.client.get('/test_client/mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test message')
self.assertEqual(mail.outbox[0].body, 'This is a test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
def test_mass_mail_sending(self):
"Test that mass mail is redirected to a dummy outbox during test setup"
response = self.client.get('/test_client/mass_mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, 'First Test message')
self.assertEqual(mail.outbox[0].body, 'This is the first test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
self.assertEqual(mail.outbox[1].subject, 'Second Test message')
self.assertEqual(mail.outbox[1].body, 'This is the second test email')
self.assertEqual(mail.outbox[1].from_email, 'from@example.com')
self.assertEqual(mail.outbox[1].to[0], 'second@example.com')
self.assertEqual(mail.outbox[1].to[1], 'third@example.com')
class CSRFEnabledClientTests(TestCase):
def setUp(self):
# Enable the CSRF middleware for this test
self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES
csrf_middleware_class = 'django.middleware.csrf.CsrfViewMiddleware'
if csrf_middleware_class not in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES += (csrf_middleware_class,)
def tearDown(self):
settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES
def test_csrf_enabled_client(self):
"A client can be instantiated with CSRF checks enabled"
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post('/test_client/post_view/', {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post('/test_client/post_view/', {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(TestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
"""A test case can specify a custom class for self.client."""
self.assertEqual(hasattr(self.client, "i_am_customized"), True)
class RequestFactoryTest(TestCase):
def test_request_factory(self):
factory = RequestFactory()
request = factory.get('/somewhere/')
response = get_view(request)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This is a test')
| apache-2.0 |
mudler/entropy | client/solo/commands/download.py | 5 | 5987 | # -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <lxnay@sabayon.org>
@contact: lxnay@sabayon.org
@copyright: Fabio Erculiani
@license: GPL-2
B{Entropy Command Line Client}.
"""
import os
import sys
import argparse
from entropy.i18n import _
from entropy.output import darkred, darkgreen, blue
from solo.commands.descriptor import SoloCommandDescriptor
from solo.commands._manage import SoloManage
class SoloDownload(SoloManage):
"""
Main Solo Download command.
"""
NAME = "download"
ALIASES = ["fetch"]
ALLOW_UNPRIVILEGED = False
INTRODUCTION = """\
Download packages, essentially.
"""
SEE_ALSO = "equo-source(1)"
def __init__(self, args):
SoloManage.__init__(self, args)
self._commands = {}
def _get_parser(self):
"""
Overridden from SoloCommand.
"""
_commands = {}
descriptor = SoloCommandDescriptor.obtain_descriptor(
SoloDownload.NAME)
parser = argparse.ArgumentParser(
description=descriptor.get_description(),
formatter_class=argparse.RawDescriptionHelpFormatter,
prog="%s %s" % (sys.argv[0], SoloDownload.NAME))
parser.set_defaults(func=self._download)
parser.add_argument(
"packages", nargs='+',
metavar="<package>", help=_("package name"))
mg_group = parser.add_mutually_exclusive_group()
mg_group.add_argument(
"--ask", "-a", action="store_true",
default=False,
help=_("ask before making any changes"))
_commands["--ask"] = {}
_commands["-a"] = {}
mg_group.add_argument(
"--pretend", "-p", action="store_true",
default=False,
help=_("show what would be done"))
_commands["--pretend"] = {}
_commands["-p"] = {}
parser.add_argument(
"--verbose", "-v", action="store_true",
default=False,
help=_("verbose output"))
_commands["--verbose"] = {}
_commands["-v"] = {}
parser.add_argument(
"--quiet", action="store_true",
default=False,
help=_("quiet output"))
_commands["--quiet"] = {}
parser.add_argument(
"--nodeps", action="store_true",
default=False,
help=_("exclude package dependencies"))
_commands["--nodeps"] = {}
parser.add_argument(
"--onlydeps", "-o", action="store_true",
default=False,
help=_("only include dependencies of selected packages"))
_commands["--onlydeps"] = {}
_commands["-o"] = {}
parser.add_argument(
"--norecursive", action="store_true",
default=False,
help=_("do not calculate dependencies recursively"))
_commands["--norecursive"] = {}
parser.add_argument(
"--deep", action="store_true",
default=False,
help=_("include dependencies no longer needed"))
_commands["--deep"] = {}
parser.add_argument(
"--relaxed", action="store_true",
default=False,
help=_("calculate dependencies relaxing constraints"))
_commands["--relaxed"] = {}
parser.add_argument(
"--bdeps", action="store_true",
default=False,
help=_("include build-time dependencies"))
_commands["--bdeps"] = {}
parser.add_argument(
"--multifetch",
type=int, default=1,
choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
help=_("download multiple packages in parallel (max 10)"))
_commands["--multifetch"] = {}
self._commands = _commands
return parser
def bashcomp(self, last_arg):
"""
Overridden from SoloCommand.
"""
self._get_parser() # this will generate self._commands
return self._hierarchical_bashcomp(last_arg, [], self._commands)
def _download(self, entropy_client):
"""
Solo Download command.
"""
ask = self._nsargs.ask
pretend = self._nsargs.pretend
verbose = self._nsargs.verbose
quiet = self._nsargs.quiet
deep = self._nsargs.deep
deps = not self._nsargs.nodeps
recursive = not self._nsargs.norecursive
relaxed = self._nsargs.relaxed
onlydeps = self._nsargs.onlydeps
bdeps = self._nsargs.bdeps
multifetch = self._nsargs.multifetch
inst_repo = entropy_client.installed_repository()
with inst_repo.shared():
packages = self._scan_packages(
entropy_client, self._nsargs.packages)
if not packages:
entropy_client.output(
"%s." % (
darkred(_("No packages found")),),
level="error", importance=1)
return 1
run_queue, removal_queue = self._generate_install_queue(
entropy_client, packages, deps, False, deep, relaxed,
onlydeps, bdeps, recursive)
if (run_queue is None) or (removal_queue is None):
return 1
elif not (run_queue or removal_queue):
entropy_client.output(
"%s." % (blue(_("Nothing to do")),),
level="warning", header=darkgreen(" @@ "))
return 0
if pretend:
entropy_client.output(
"%s." % (blue(_("All done")),))
return 0
down_data = {}
exit_st = self._download_packages(
entropy_client, run_queue, down_data, multifetch)
if exit_st == 0:
self._signal_ugc(entropy_client, down_data)
return exit_st
SoloCommandDescriptor.register(
SoloCommandDescriptor(
SoloDownload,
SoloDownload.NAME,
_("download packages, essentially"))
)
| gpl-2.0 |
zack3241/incubator-airflow | tests/contrib/operators/test_dataflow_operator.py | 2 | 7106 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow.contrib.operators.dataflow_operator import DataFlowPythonOperator, \
DataFlowJavaOperator, DataflowTemplateOperator
from airflow.contrib.operators.dataflow_operator import DataFlowPythonOperator
from airflow.version import version
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
TASK_ID = 'test-dataflow-operator'
TEMPLATE = 'gs://dataflow-templates/wordcount/template_file'
PARAMETERS = {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': 'gs://test/output/my_output'
}
PY_FILE = 'gs://my-bucket/my-object.py'
JAR_FILE = 'example/test.jar'
JOB_CLASS = 'com.test.NotMain'
PY_OPTIONS = ['-m']
DEFAULT_OPTIONS_PYTHON = DEFAULT_OPTIONS_JAVA = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
}
DEFAULT_OPTIONS_TEMPLATE = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f'
}
ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar'}
}
TEST_VERSION = 'v{}'.format(version.replace('.', '-').replace('+', '-'))
EXPECTED_ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
POLL_SLEEP = 30
GCS_HOOK_STRING = 'airflow.contrib.operators.dataflow_operator.{}'
class DataFlowPythonOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataFlowPythonOperator(
task_id=TASK_ID,
py_file=PY_FILE,
py_options=PY_OPTIONS,
dataflow_default_options=DEFAULT_OPTIONS_PYTHON,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataFlowPythonOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.py_file, PY_FILE)
self.assertEqual(self.dataflow.py_options, PY_OPTIONS)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_PYTHON)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
@mock.patch('airflow.contrib.operators.dataflow_operator.DataFlowHook')
@mock.patch(GCS_HOOK_STRING.format('GoogleCloudBucketHelper'))
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_python_workflow.
"""
start_python_hook = dataflow_mock.return_value.start_python_dataflow
gcs_download_hook = gcs_hook.return_value.google_cloud_to_local
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'staging_location': 'gs://test/staging',
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}
}
gcs_download_hook.assert_called_once_with(PY_FILE)
start_python_hook.assert_called_once_with(TASK_ID, expected_options,
mock.ANY, PY_OPTIONS)
self.assertTrue(self.dataflow.py_file.startswith('/tmp/dataflow'))
class DataFlowJavaOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataFlowJavaOperator(
task_id=TASK_ID,
jar=JAR_FILE,
job_class=JOB_CLASS,
dataflow_default_options=DEFAULT_OPTIONS_JAVA,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_JAVA)
self.assertEqual(self.dataflow.job_class, JOB_CLASS)
self.assertEqual(self.dataflow.jar, JAR_FILE)
self.assertEqual(self.dataflow.options,
EXPECTED_ADDITIONAL_OPTIONS)
@mock.patch('airflow.contrib.operators.dataflow_operator.DataFlowHook')
@mock.patch(GCS_HOOK_STRING.format('GoogleCloudBucketHelper'))
def test_exec(self, gcs_hook, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_java_workflow.
"""
start_java_hook = dataflow_mock.return_value.start_java_dataflow
gcs_download_hook = gcs_hook.return_value.google_cloud_to_local
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
gcs_download_hook.assert_called_once_with(JAR_FILE)
start_java_hook.assert_called_once_with(TASK_ID, mock.ANY,
mock.ANY, JOB_CLASS)
class DataFlowTemplateOperatorTest(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowTemplateOperator(
task_id=TASK_ID,
template=TEMPLATE,
parameters=PARAMETERS,
dataflow_default_options=DEFAULT_OPTIONS_TEMPLATE,
poll_sleep=POLL_SLEEP)
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
self.assertEqual(self.dataflow.task_id, TASK_ID)
self.assertEqual(self.dataflow.template, TEMPLATE)
self.assertEqual(self.dataflow.parameters, PARAMETERS)
self.assertEqual(self.dataflow.poll_sleep, POLL_SLEEP)
self.assertEqual(self.dataflow.dataflow_default_options,
DEFAULT_OPTIONS_TEMPLATE)
@mock.patch('airflow.contrib.operators.dataflow_operator.DataFlowHook')
def test_exec(self, dataflow_mock):
"""Test DataFlowHook is created and the right args are passed to
start_template_workflow.
"""
start_template_hook = dataflow_mock.return_value.start_template_dataflow
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
expected_options = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f'
}
start_template_hook.assert_called_once_with(TASK_ID, expected_options,
PARAMETERS, TEMPLATE)
| apache-2.0 |
tseaver/google-cloud-python | firestore/google/cloud/firestore_v1beta1/__init__.py | 4 | 2467 | # Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python idiomatic client for Google Cloud Firestore."""
from pkg_resources import get_distribution
__version__ = get_distribution("google-cloud-firestore").version
from google.cloud.firestore_v1beta1 import types
from google.cloud.firestore_v1beta1._helpers import GeoPoint
from google.cloud.firestore_v1beta1._helpers import ExistsOption
from google.cloud.firestore_v1beta1._helpers import LastUpdateOption
from google.cloud.firestore_v1beta1._helpers import ReadAfterWriteError
from google.cloud.firestore_v1beta1._helpers import WriteOption
from google.cloud.firestore_v1beta1.batch import WriteBatch
from google.cloud.firestore_v1beta1.client import Client
from google.cloud.firestore_v1beta1.collection import CollectionReference
from google.cloud.firestore_v1beta1.transforms import ArrayRemove
from google.cloud.firestore_v1beta1.transforms import ArrayUnion
from google.cloud.firestore_v1beta1.transforms import DELETE_FIELD
from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP
from google.cloud.firestore_v1beta1.document import DocumentReference
from google.cloud.firestore_v1beta1.document import DocumentSnapshot
from google.cloud.firestore_v1beta1.gapic import enums
from google.cloud.firestore_v1beta1.query import Query
from google.cloud.firestore_v1beta1.transaction import Transaction
from google.cloud.firestore_v1beta1.transaction import transactional
from google.cloud.firestore_v1beta1.watch import Watch
__all__ = [
"__version__",
"ArrayRemove",
"ArrayUnion",
"Client",
"CollectionReference",
"DELETE_FIELD",
"DocumentReference",
"DocumentSnapshot",
"enums",
"ExistsOption",
"GeoPoint",
"LastUpdateOption",
"Query",
"ReadAfterWriteError",
"SERVER_TIMESTAMP",
"Transaction",
"transactional",
"types",
"Watch",
"WriteBatch",
"WriteOption",
]
| apache-2.0 |
HM2MC/Webfront | m2m/stats/views.py | 2 | 12637 | from django.shortcuts import render_to_response
from django.db.models import Sum,Min,Max
from datetime import datetime
import time as Time
from stats.models import Status, Status2, Log
from search.models import File, Host
from browseNet.models import Share,Path
# Create your views here.
def getOnlineSize():
'''custom SQL for getting the current online size of the network'''
from django.db import connection
cursor = connection.cursor()
cursor.execute("SELECT Sum(share.TotalFileSize) as Size FROM host LEFT JOIN share USING(hid) WHERE host.Flags & 16=16")
response = cursor.fetchone()
connection.close()
return response
def sizeToReadable(value):
'''takes a number of bits and reformats it to nice MB/GB/TB format'''
try:
value = float(value)
except Exception: # we expect a number, after all.
#or something that can be turned into a number.
return "??"
count = 0
while value > 1024:
value = value/1024
count += 1
if count == 1:
appender = "KiB"
elif count == 2:
appender = "MiB"
elif count == 3:
appender = "GiB"
elif count == 4:
appender = "TiB"
else:
appender = "B"
niceNum = "%.2f" % value # 1 decimal place for table formatting reasons
return niceNum + " " + appender
def display(request):
# these make the numbers small enough for google analytics urls
BYTEDIVISOR = 1099511627776 #tsk tsk! don't use magic numbers, bryce.
TIMEDIVISOR = 86400
minSize = Status.objects.aggregate(Min('filesize'))['filesize__min']
minSizeB = minSize/BYTEDIVISOR
maxSize = Status.objects.aggregate(Max('filesize'))['filesize__max']
maxSizeB = maxSize/BYTEDIVISOR
minFiles = Status.objects.aggregate(Min('files'))['files__min']
maxFiles = Status.objects.aggregate(Max('files'))['files__max']
minDirs = Status.objects.aggregate(Min('directories'))['directories__min']
maxDirs = Status.objects.aggregate(Max('directories'))['directories__max']
ID = Status.objects.order_by('-id')[0].id
#creates a unix timestamp
maxDate = Time.mktime(datetime.now().timetuple())
maxDateB = maxDate/TIMEDIVISOR
fileSizes = ''
numFiles = ''
dates = ''
dateMin = 0
#begin populating the querystrings for the charts
for counter in range(0,62):
if ID < 1:
break
if counter == 0: #so we don't add a comma to the start of our strings
delim = ""
else:
delim = ","
try:
row = Status.objects.get(pk=ID)
except:
break
fileSizes = "%(size).2f%(delim)s%(sizes)s" % {'size':row.filesize/BYTEDIVISOR,'delim':delim,'sizes':fileSizes,}
numFiles = "%(num)d%(delim)s%(nums)s" % {'num':row.files,'delim':delim,'nums':numFiles}
dateMin = Time.mktime(datetime.strptime(row.lastchange,"%m/%d/%Y %H:%M").timetuple())/TIMEDIVISOR
dates = "%(date).2f%(delim)s%(dates)s" % {'date':dateMin-maxDateB,'delim':delim,'dates':dates}
ID -= 4
ID = Status2.objects.aggregate(Max('id'))['id__max']-6
minQ = 1000
maxQ = 0
minHosts = 1000
maxHosts = 0
queries = ''
online = ''
datesB = ''
dateMinB = 0
BADTIME = 1216103513 #whyyy bryce whyy
for counter in range(0,98):
if ID < 1:
break
if counter == 0:
delim = ''
else:
delim = ','
try:
row0 = Status2.objects.get(pk=ID)
row1 = Status2.objects.get(pk=ID-6)
row2 = Status2.objects.get(pk=ID+6)
except:
break
q = (row2.queries - row1.queries)/2.167 # what is this number!?
queries = "%(q)d%(delim)s%(queries)s" % {'q':q,'delim':delim,'queries':queries}
hosts = row0.onlinehosts
if q > maxQ:
maxQ = q
if q < minQ:
minQ = q
if hosts > maxHosts:
maxHosts = hosts
if hosts < minHosts:
minHosts = hosts
online = "%(hosts)d%(delim)s%(online)s" % {'hosts':hosts,'delim':delim,'online':online}
time = Time.mktime(row0.time.timetuple())
if time == BADTIME:
time -= 600 * (4056-ID)
dateMinB = time/TIMEDIVISOR
datesB = "%(date).2f%(delim)s%(dates)s" % {'date':dateMinB-maxDateB,'delim':delim,'dates':datesB}
ID -= 11
minHosts = 0 # reset this for scale purposes
dateMin = dateMin - maxDateB
dateMinB = dateMinB - maxDateB
maxFile = 0
maxHostSize = 0
maxN = 10000000000000
minN = 100
count = minN
fileSizesB = ''
hostSizesB = ''
labels = ''
# this takes a FUCKING LONG TIME.
# ask yourself if its worth it before you uncomment this.
'''while count < maxN:
count *= 100
mini = count/100
if count == minN:
delim = ""
mini = 0
else:
delim = ","
row = len(File.objects.filter(filesize__range=(mini+1,count)))
if row > maxFile:
maxFile = row
fileSizesB = "%(size)d%(delim)s%(sizes)s" % {'size':row,'delim':delim,'sizes':fileSizesB}
row = len(Host.objects.filter(totalfilesize__range=(mini+1,count)))
if row > maxHostSize:
maxHostSize = row
hostSizesB = "%(size)d%(delim)s%(sizes)s" % {'size':row,'delim':delim,'sizes':hostSizesB}
labels = "%(labels)s%(delim)s%(size)s" % {'labels':labels,'delim':"|< ",'size':sizeToReadable(count)}
maxFile *= 2
'''
#----
# Make the graph queries themselves
#----
querySrc = "http://chart.apis.google.com/chart?\
cht=lxy&\
chd=t:%(datesB)s|%(online)s|%(datesB)s|%(queries)s&\
chds=%(dateMinB)f,0,0,%(maxQ)d,%(dateMinB)f,0,0,%(maxQ)d&\
chdl=Online Hosts|Queries/Hr&\
chco=ABD1E6,000000&\
chxt=x,y,x,r&\
chxl=2:||Days|&\
chxr=0,%(dateMinB)f,0|1,0,%(maxQ)d|3,0,%(maxQ)d&\
chs=380x200" % {
'datesB':datesB,
'online':online,
'queries':queries,
'dateMinB':dateMinB,
'minHosts':minHosts,
'maxHosts':maxHosts,
'maxQ':maxQ,
'minQ':minQ,
}
sizeSrc = "http://chart.apis.google.com/chart?\
cht=lxy&\
chd=t:%(dates)s|%(numFiles)s|%(dates)s|%(fileSizes)s&\
chds=%(dateMin)f,0,%(minFiles)d,%(maxFiles)d,%(dateMin)f,0,%(minSizeB)d,%(maxSizeB)d&\
chdl=Files|Total Size (TiB)&\
chco=ABD1E6,000000&\
chxt=x,y,x,r&\
chxl=2:||Days|&\
chxr=0,%(dateMin)f,0|1,%(minSizeB)d,%(maxSizeB)d|3,%(minFiles)d,%(maxFiles)d&\
chdlp=r&\
chs=440x200" % {
'dates':dates,
'numFiles':numFiles,
'fileSizes':fileSizes,
'minFiles':minFiles,
'maxFiles':maxFiles,
'dateMin':dateMin,
'minSizeB':minSizeB,
'maxSizeB':maxSizeB,
}
# if len(sizeSrc) > 2048:
# sizeSrc = "http://chart.apis.google.com/chart?chs=440x200&chf=bg,s,ffffff&cht=t&chtm=world&chco=ecf3fe,cccccc,0000ff&chld=US&chd=t:100"
barsizeSrc = "http://chart.apis.google.com/chart?cht=bvs&\
chco=000000,ABD1E6&\
chs=820x50&chbh=a&\
chxt=x,y,r&\
chdl=Files|Hosts&chxl=0:%(labels)s|&\
chxr=1,0,%(maxFile)d|2,0,%(maxHostSize)d&\
chds=0,%(maxFile)d,0,%(maxHostSize)d&\
chd=t:%(fileSizesB)s|%(hostSizesB)s" %{
'labels':labels,
'maxFile':maxFile,
'maxHostSize':maxHostSize,
'fileSizesB':fileSizesB,
'hostSizesB':hostSizesB,
}
#----
# And now for the cold, hard numbers
#----
infoHolder = Status.objects.all().order_by('-id')[0]
lastCrawl = infoHolder.lastchange
indexedServers = infoHolder.smbhosts
dirCount = infoHolder.directories
fileCount = infoHolder.files
netSize = sizeToReadable(infoHolder.filesize)
queryCount = infoHolder.queries
infoHolder = Status2.objects.all().order_by('-id')[0]
onlineServers = infoHolder.onlinehosts
onlineSize = sizeToReadable(getOnlineSize()[0])
#----
# Pulling info from the Log table
#----
'''
# Last activity:
latestActivity = Log.objects.raw("SELECT * FROM log ORDER BY LID DESC LIMIT 5")
latestActions = []
for row in latestActivity:
action = row.searchstring.split()
actions = [
'browse',
'Browse:',
'Search:',
]
if action[0] in actions:
# BROWSE
if action[0] == actions[0] or action[0] == actions[1]:
#now, parse the browse.
#action[1] should be in the form "HID=?,PID=?,SID=?"
if action[0] == actions[0]:
pieces = action[1].split(',')
taped = []
for piece in pieces:
taped += [piece.split('=')]
if taped[0][1] != '0': # HOST
host = Host.objects.get(pk=int(taped[0][1]))
elif taped[1][1] != '0': # SHARE
host = Share.objects.get(pk=int(taped[1][1])).hostid
elif taped[2][1] != '0': # PATH
host = Path.objects.get(pk=int(taped[2][1])).hid
else:
host = "??"
#action[1:] should be in the form ["H/S/P,","####"]
elif action[0] == actions[1]:
if action[1] == "H,":
host = Host.objects.get(pk=int(action[2]))
elif action[1] == "S,":
host = Share.objects.get(pk=int(action[2]))
elif action[1] == "P,":
host = Path.objects.get(pk=int(action[2]))
else:
host = "??"
else:
host = "??"
lastAction = "%(ip)s browsed %(host)s" % {'ip':row.client,'host':host}
# a search from new M2M
elif action[0] == actions[2]:
lastAction = "%(ip)s searched for %(search)s" % {'ip':row.client,'search':''.join(action[1:])}
# a search from old M2M
else:
lastAction ="%(ip)s searched for %(search)s" % {'ip':row.client,'search':" ".join(action)}
latestActions += [lastAction]
'''
debug = {
'querySrc':querySrc,
'sizeSrc':sizeSrc,
'barsizeSrc':barsizeSrc,
'lastCrawl':lastCrawl,
'indexedServers':indexedServers,
'onlineServers':onlineServers,
'dirCount':dirCount,
'fileCount':fileCount,
'netSize':netSize,
'onlineSize':onlineSize,
'queryCount':queryCount,
}
return render_to_response("stats/display.html",
{
'stats':'current',
'siteStat':'current',
'querySrc':querySrc,
'sizeSrc':sizeSrc,
'barsizeSrc':barsizeSrc,
'lastCrawl':lastCrawl,
'indexedServers':indexedServers,
'onlineServers':onlineServers,
'dirCount':dirCount,
'fileCount':fileCount,
'netSize':netSize,
'onlineSize':onlineSize,
'queryCount':queryCount,
#'lastActions':latestActions,
'debug':debug,
},) | mit |
pklimai/py-junos-eznc | lib/jnpr/junos/utils/ftp.py | 1 | 3689 | """
FTP utility
"""
import re
import ftplib
import os
import logging
logger = logging.getLogger("jnpr.junos.utils.ftp")
class FTP(ftplib.FTP):
"""
FTP utility can be used to transfer files to and from device.
"""
def __init__(self, junos, **ftpargs):
"""
:param Device junos: Device object
:param kvargs ftpargs: any additional args to be passed to ftplib FTP
Supports python *context-manager* pattern. For example::
from jnpr.junos.utils.ftp import FTP
with FTP(dev) as ftp:
ftp.put(package, remote_path)
"""
self._junos = junos
self._ftpargs = ftpargs
ftplib.FTP.__init__(self, self._junos._hostname,
self._junos._auth_user,
self._junos._auth_password)
# dummy function, as connection is created by ftb lib in __init__ only
def open(self):
return self
def put(self, local_file, remote_path=None):
"""
This function is used to upload file to the router from local
execution server/shell.
:param local_file: Full path along with filename which has to be
copied to router
:param remote_path: path in which to receive the files on the remote
host. If ignored FILE will be copied to "tmp"
:returns: True if the transfer succeeds, else False
"""
try:
mat = re.search('^.*/(.*)$', local_file)
if mat:
if not remote_path:
remote_file = '/tmp/' + mat.group(1)
else:
if re.search('^.*/(.*)$', remote_path) and \
re.search('\.\w+$', remote_path):
remote_file = remote_path
# Looks like remote path is given as file location
else:
remote_file = os.path.join(remote_path, mat.group(1))
else:
if not remote_path:
remote_file = os.path.join('/tmp/', local_file)
else:
remote_file = os.path.join(remote_path, local_file)
self.storbinary('STOR ' + remote_file, open(local_file, 'rb'))
except Exception as ex:
logger.error(ex)
return False
return True
def get(self, remote_file, local_path=os.getcwd()):
"""
This function is used to download file from router to local execution
server/shell.
:param local_path: path in which to receive files locally
:param remote_file: Full path along with filename on the router. If
ignored FILE will be copied to "tmp"
:returns: True if the transfer succeeds, else False
"""
if os.path.isdir(local_path):
mat = re.search('^.*/(.*)$', remote_file)
if mat:
local_file = os.path.join(local_path, mat.group(1))
else:
local_file = local_path
else:
local_file = local_path
try:
self.retrbinary('RETR ' + remote_file,
open(local_file, 'wb').write)
except Exception as ex:
logger.error(ex)
return False
return True
# -------------------------------------------------------------------------
# CONTEXT MANAGER
# -------------------------------------------------------------------------
def __enter__(self):
# return self.open(**self._ftpargs)
return self
def __exit__(self, exc_ty, exc_val, exc_tb):
return self.close()
| apache-2.0 |
n0n0x/fabtools-python | fabtools/tests/test_vagrant_status.py | 15 | 5075 | import textwrap
import unittest
from mock import patch
class TestParseVagrantMachineReadableStatus(unittest.TestCase):
def test_machine_readable_status_running(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = textwrap.dedent(r"""
1391354677,default,provider-name,vmware_fusion
1391354677,default,state,running
1391354677,default,state-human-short,running
1391354677,default,state-human-long,The VM is running. To stop this VM%!(VAGRANT_COMMA) you can run `vagrant halt` to\nshut it down%!(VAGRANT_COMMA) or you can run `vagrant suspend` to simply suspend\nthe virtual machine. In either case%!(VAGRANT_COMMA) to restart it again%!(VAGRANT_COMMA) run\n`vagrant up`.
""")
from fabtools.vagrant import _status_machine_readable
res = _status_machine_readable()
self.assertEqual(res, [('default', 'running')])
def test_machine_readable_status_not_running(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = textwrap.dedent(r"""
1391366299,default,provider-name,vmware_fusion
1391366299,default,state,not_running
1391366299,default,state-human-short,not running
1391366299,default,state-human-long,The VM is powered off. To restart the VM%!(VAGRANT_COMMA) run `vagrant up`
""")
from fabtools.vagrant import _status_machine_readable
res = _status_machine_readable()
self.assertEqual(res, [('default', 'not running')])
class TestParseVagrantStatusWithProvider(unittest.TestCase):
def test_parse_status_running(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = textwrap.dedent("""\
Current machine states:
default running (vmware_fusion)
The VM is running. To stop this VM, you can run `vagrant halt` to
shut it down, or you can run `vagrant suspend` to simply suspend
the virtual machine. In either case, to restart it again, run
`vagrant up`.
""")
from fabtools.vagrant import _status_human_readable
res = _status_human_readable()
self.assertEqual(res, [('default', 'running')])
def test_parse_status_not_created(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = textwrap.dedent("""\
Current machine states:
default not created (vmware_fusion)
The VMware machine has not yet been created. Run `vagrant up`
to create the machine. If a machine is not created, only the
default provider will be shown. Therefore, if a provider is not listed,
then the machine is not created for that provider.
""")
from fabtools.vagrant import _status_human_readable
res = _status_human_readable()
self.assertEqual(res, [('default', 'not created')])
class TestParseVagrantStatusWithoutProvider(unittest.TestCase):
def test_parse_status_running(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = textwrap.dedent("""\
Current machine states:
default running
The VM is running. To stop this VM, you can run `vagrant halt` to
shut it down, or you can run `vagrant suspend` to simply suspend
the virtual machine. In either case, to restart it again, run
`vagrant up`.
""")
from fabtools.vagrant import _status_human_readable
res = _status_human_readable()
self.assertEqual(res, [('default', 'running')])
def test_parse_status_not_created(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = textwrap.dedent("""\
Current machine states:
default not created
The VMware machine has not yet been created. Run `vagrant up`
to create the machine. If a machine is not created, only the
default provider will be shown. Therefore, if a provider is not listed,
then the machine is not created for that provider.
""")
from fabtools.vagrant import _status_human_readable
res = _status_human_readable()
self.assertEqual(res, [('default', 'not created')])
class TestVagrantStatus(unittest.TestCase):
def test_status(self):
with patch('fabtools.vagrant._status') as mock_status:
mock_status.return_value = [('default', 'running')]
from fabtools.vagrant import status
self.assertEqual(status(), 'running')
| bsd-2-clause |
sirkubax/ansible | lib/ansible/module_utils/database.py | 401 | 5839 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class SQLParseError(Exception):
pass
class UnclosedQuoteError(SQLParseError):
pass
# maps a type of identifier to the maximum number of dot levels that are
# allowed to specify that identifier. For example, a database column can be
# specified by up to 4 levels: database.schema.table.column
_PG_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, schema=2, table=3, column=4, role=1)
_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
def _find_end_quote(identifier, quote_char):
accumulate = 0
while True:
try:
quote = identifier.index(quote_char)
except ValueError:
raise UnclosedQuoteError
accumulate = accumulate + quote
try:
next_char = identifier[quote+1]
except IndexError:
return accumulate
if next_char == quote_char:
try:
identifier = identifier[quote+2:]
accumulate = accumulate + 2
except IndexError:
raise UnclosedQuoteError
else:
return accumulate
def _identifier_parse(identifier, quote_char):
if not identifier:
raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
already_quoted = False
if identifier.startswith(quote_char):
already_quoted = True
try:
end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
except UnclosedQuoteError:
already_quoted = False
else:
if end_quote < len(identifier) - 1:
if identifier[end_quote+1] == '.':
dot = end_quote + 1
first_identifier = identifier[:dot]
next_identifier = identifier[dot+1:]
further_identifiers = _identifier_parse(next_identifier, quote_char)
further_identifiers.insert(0, first_identifier)
else:
raise SQLParseError('User escaped identifiers must escape extra quotes')
else:
further_identifiers = [identifier]
if not already_quoted:
try:
dot = identifier.index('.')
except ValueError:
identifier = identifier.replace(quote_char, quote_char*2)
identifier = ''.join((quote_char, identifier, quote_char))
further_identifiers = [identifier]
else:
if dot == 0 or dot >= len(identifier) - 1:
identifier = identifier.replace(quote_char, quote_char*2)
identifier = ''.join((quote_char, identifier, quote_char))
further_identifiers = [identifier]
else:
first_identifier = identifier[:dot]
next_identifier = identifier[dot+1:]
further_identifiers = _identifier_parse(next_identifier, quote_char)
first_identifier = first_identifier.replace(quote_char, quote_char*2)
first_identifier = ''.join((quote_char, first_identifier, quote_char))
further_identifiers.insert(0, first_identifier)
return further_identifiers
def pg_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char='"')
if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
return '.'.join(identifier_fragments)
def mysql_quote_identifier(identifier, id_type):
identifier_fragments = _identifier_parse(identifier, quote_char='`')
if len(identifier_fragments) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
special_cased_fragments = []
for fragment in identifier_fragments:
if fragment == '`*`':
special_cased_fragments.append('*')
else:
special_cased_fragments.append(fragment)
return '.'.join(special_cased_fragments)
| gpl-3.0 |
Jgarcia-IAS/Fidelizacion_odoo | openerp/addons/account_payment/__init__.py | 436 | 1279 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#----------------------------------------------------------
# Init Sales
#----------------------------------------------------------
import account_payment
import wizard
import account_move_line
import account_invoice
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Bobypalcka/coinwatcher | lib/cli_menu.py | 1 | 6736 | import os
import sys
import time
from colorama import Back, Fore, Style, init
from .api_pull_data import api_data_fill as update
from .coin_price_rank import coins
from .details import owerview
from .newUser import create_new_user as newUser
from .user_balance import balance
from .wallets import wallet_key
class mainmenu(object):
def __init__(self, idu):
init()
os.system('cls' if os.name == 'nt' else 'clear')
self.userid = idu
update(self.userid)
self.uvod()
self.main_meni_exit = False
while self.main_meni_exit == False:
self.main_meni()
else:
self.save_and_exit()
def main_meni(self, level=0):
menu = {}
menu[0] = {1: " Coins & Balance",
2: " Wallets \n", 3: " Status\n", 4: " Settings\n", 9: " Logout", 90: " Exit"}
menu[1] = {1: " Check price of a coin", 2: " List top ranked coins (top 30)\n",
3: " Balance print", 4: " Add coin to balance",
5: " Edit coin balance", 6: " Remove coin from balance\n", 9: " Go back"}
menu[2] = {1: " List all wallets", 2: " Add wallet public key",
3: " Remove wallet\n", 4: " Check new balance for all wallets", 5: " Rename wallet\n", 9: " Go back"}
menu[3] = {1: " Quick preview", 2: " Detail view\n",
3: " Global stats\n", 9: " Go back"}
menu[4] = {1: " Create new user", 2: " Set new password\n",
3: " Api request settings\n", 9: " Go back"}
menu[99] = {1: " Test-update_database", 2: " Print all lists",
3: " Test files", 4: " Auto-update toggle", 9: " Go back"}
print(Fore.GREEN + " Level : " +
menu[0][level] if level > 0 and level < 99 else "Main menu:")
print(Fore.MAGENTA + "-------------\n" + Fore.CYAN)
for option in menu[level]:
print(str(option) + menu[level][option])
print(Fore.MAGENTA + "-------------\n" + Fore.CYAN)
# user input
usr_in = input("Option number: ")
os.system('cls' if os.name == 'nt' else 'clear')
if level == 0 and usr_in == '9':
self.main_meni_exit = True
return 0
elif level == 0 and usr_in == '90':
os.system('cls' if os.name == 'nt' else 'clear')
print("Bye")
time.sleep(4)
os.system('cls' if os.name == 'nt' else 'clear')
sys.exit()
# if manu ---- lame --- will be nested or will return to menu 0 :S
if level == 0:
if usr_in == '1':
self.main_meni(1)
if usr_in == '2':
self.main_meni(2)
if usr_in == '3':
self.main_meni(3)
if usr_in == '4':
self.main_meni(4)
if usr_in == '9':
self.save_and_exit()
if usr_in == 'admin':
self.main_meni(99)
elif level == 1:
if usr_in == '1':
coins().search()
if usr_in == '2':
coins().rank()
if usr_in == '3':
balance(self.userid).print_user_balance()
if usr_in == '4':
balance(self.userid).add_coin_to_balance()
if usr_in == '5':
balance(self.userid).edit_coin_balance()
if usr_in == '6':
balance(self.userid).remove_coin_balance()
if usr_in == '9':
pass
elif level == 2:
if usr_in == '1':
wallet_key(self.userid).list_wallets()
if usr_in == '2':
wallet_key(self.userid).add_wallet_public_key()
if usr_in == '3':
wallet_key(self.userid).delete_row_wallet()
if usr_in == '4':
wallet_key(self.userid).update_wallets()
if usr_in == '5':
wallet_key(self.userid).edit_wallet()
if usr_in == '9':
pass
elif level == 3:
if usr_in == '1':
owerview().user_view(self.userid)
if usr_in == '2':
owerview().user_view(self.userid, True)
if usr_in == '3':
update(self.userid, True)
pass
if usr_in == '9':
pass
elif level == 4:
# print("Settings\n__________")
if usr_in == '1':
# Create new user
newUser()
if usr_in == '2':
newUser(False)._new_password(self.userid)
if usr_in == '3':
print(Fore.YELLOW + "Currently we use:\nFor coin prices = coinmarketcap api\nFor BTC wallets = blockchain.info api\nFor ETH and ETH Tokens = etherscan.io")
print(Fore.WHITE)
sss = input("Press enter to return.")
os.system('cls' if os.name == 'nt' else 'clear')
pass
if usr_in == '9':
pass
elif level == 99:
if usr_in == '1':
# Quick preview
pass
if usr_in == '2':
# Detail view"
pass
if usr_in == '3':
# Global stats
pass
if usr_in == '4':
# auto_update_toggle()
pass
if usr_in == '9':
pass
else:
print(Fore.RED + 'Option not existing.')
pass
def uvod(self):
print(Fore.CYAN + ' Kripto.info v-0.1 ')
print(Fore.MAGENTA + "________________________________________\n")
print(Fore.CYAN + " ** * * *** *** * *** ")
print(" * * * * * * * ")
print(" * * * *** *** * ** ")
print(" * * * * * * * ")
print(" *** ** * * *** *** ")
print(Fore.MAGENTA + "________________________________________")
print(Fore.RED + " !!! NEVER SAVE PRIVATE KEYS !!!")
print(Fore.CYAN + ' ')
def save_and_exit(self, logout=True):
if logout:
os.system('cls' if os.name == 'nt' else 'clear')
print("\nLoged out!\n")
time.sleep(2)
# sys.exit()
os.system('cls' if os.name == 'nt' else 'clear')
print(Style.RESET_ALL)
return 9
else:
os.system('cls' if os.name == 'nt' else 'clear')
print("Program interupted.\n Sys exit in place")
print(Style.RESET_ALL)
time.sleep(1)
sys.exit()
| unlicense |
benjamindeleener/odoo | addons/mass_mailing/controllers/web_editor.py | 45 | 1343 | # -*- coding: utf-8 -*-
from openerp import http
from openerp.http import request
from openerp.addons.web_editor.controllers.main import Web_Editor
class Web_Editor(Web_Editor):
@http.route(["/website_mass_mailing/field/popup_content"], type='http', auth="user")
def mass_mailing_FieldTextHtmlPopupTemplate(self, model=None, res_id=None, field=None, callback=None, **kwargs):
kwargs['snippets'] = '/website/snippets'
kwargs['template'] = 'mass_mailing.FieldTextHtmlPopupContent'
return self.FieldTextHtml(model, res_id, field, callback, **kwargs)
@http.route('/mass_mailing/field/email_template', type='http', auth="user")
def mass_mailing_FieldTextHtmlEmailTemplate(self, model=None, res_id=None, field=None, callback=None, **kwargs):
kwargs['snippets'] = '/mass_mailing/snippets'
kwargs['template'] = 'mass_mailing.FieldTextHtmlInline'
return self.FieldTextHtmlInline(model, res_id, field, callback, **kwargs)
@http.route(['/mass_mailing/snippets'], type='json', auth="user", website=True)
def mass_mailing_snippets(self):
values = {'company_id': request.env['res.users'].browse(request.uid).company_id}
return request.registry["ir.ui.view"].render(request.cr, request.uid, 'mass_mailing.email_designer_snippets', values, context=request.context)
| gpl-3.0 |
ProteusCortex/vimrc-old | .ycm_extra_conf.py | 1 | 6291 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
#'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c++',
'-isystem', './lib'
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
'-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../include/c++/v1',
'-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
# Commonly used project structure
'-I.',
'-I', 'include',
'-I', 'lib'
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.