repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
10clouds/django-federated-login | federated_login/patches.py | 1 | 6210 | # Patches openid's Consumer to function with Google Apps.
#
# Google Apps returns claimed_id values according to a working draft, but these
# are not part of the current 2.0 specification.
#
# See also:
# https://github.com/openid/python-openid/pull/39
# https://sites.google.com/site/oauthgoog/fedlogininterp/openiddiscovery
import copy
from urlparse import urldefrag
from openid.consumer import consumer
from openid.consumer.discover import OpenIDServiceEndpoint, OPENID_2_0_TYPE
from openid.message import OPENID2_NS, OPENID1_NS, no_default
from openid import oidutil
# decorator by Guido
# http://mail.python.org/pipermail/python-dev/2008-January/076194.html
def monkeypatch_method(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
@monkeypatch_method(consumer.GenericConsumer)
def _verifyDiscoveryResultsOpenID2(self, resp_msg, endpoint):
to_match = OpenIDServiceEndpoint()
to_match.type_uris = [OPENID_2_0_TYPE]
to_match.claimed_id = resp_msg.getArg(OPENID2_NS, 'claimed_id')
to_match.local_id = resp_msg.getArg(OPENID2_NS, 'identity')
# Raises a KeyError when the op_endpoint is not present
to_match.server_url = resp_msg.getArg(
OPENID2_NS, 'op_endpoint', no_default)
# claimed_id and identifier must both be present or both
# be absent
if (to_match.claimed_id is None and
to_match.local_id is not None):
raise consumer.ProtocolError(
'openid.identity is present without openid.claimed_id')
elif (to_match.claimed_id is not None and
to_match.local_id is None):
raise consumer.ProtocolError(
'openid.claimed_id is present without openid.identity')
# This is a response without identifiers, so there's really no
# checking that we can do, so return an endpoint that's for
# the specified `openid.op_endpoint'
elif to_match.claimed_id is None:
return OpenIDServiceEndpoint.fromOPEndpointURL(to_match.server_url)
# The claimed ID doesn't match, so we have to do discovery
# again. This covers not using sessions, OP identifier
# endpoints and responses that didn't match the original
# request.
if to_match.server_url.startswith(u'https://www.google.com/a/'):
import urllib
claimed_id = u'https://www.google.com/accounts/o8/user-xrds?uri=%s' % urllib.quote_plus(to_match.claimed_id)
else:
claimed_id = to_match.claimed_id
if not endpoint:
oidutil.log('No pre-discovered information supplied.')
endpoint = self._discoverAndVerify(claimed_id, [to_match])
else:
# The claimed ID matches, so we use the endpoint that we
# discovered in initiation. This should be the most common
# case.
try:
self._verifyDiscoverySingle(endpoint, to_match)
except consumer.ProtocolError, e:
oidutil.log(
"Error attempting to use stored discovery information: " +
str(e))
oidutil.log("Attempting discovery to verify endpoint")
endpoint = self._discoverAndVerify(
claimed_id, [to_match])
# The endpoint we return should have the claimed ID from the
# message we just verified, fragment and all.
if endpoint.claimed_id != to_match.claimed_id:
endpoint = copy.copy(endpoint)
endpoint.claimed_id = to_match.claimed_id
return endpoint
@monkeypatch_method(consumer.GenericConsumer)
def _verifyDiscoverySingle(self, endpoint, to_match):
"""Verify that the given endpoint matches the information
extracted from the OpenID assertion, and raise an exception if
there is a mismatch.
@type endpoint: openid.consumer.discover.OpenIDServiceEndpoint
@type to_match: openid.consumer.discover.OpenIDServiceEndpoint
@rtype: NoneType
@raises consumer.ProtocolError: when the endpoint does not match the
discovered information.
"""
# Every type URI that's in the to_match endpoint has to be
# present in the discovered endpoint.
for type_uri in to_match.type_uris:
if not endpoint.usesExtension(type_uri):
raise consumer.TypeURIMismatch(type_uri, endpoint)
# Fragments do not influence discovery, so we can't compare a
# claimed identifier with a fragment to discovered information.
if to_match.server_url.startswith(u'https://www.google.com/a/'):
import urllib
claimed_id = u'https://www.google.com/accounts/o8/user-xrds?uri=%s' % urllib.quote_plus(to_match.claimed_id)
else:
claimed_id = to_match.claimed_id
defragged_claimed_id, _ = urldefrag(claimed_id)
if defragged_claimed_id != endpoint.claimed_id:
raise consumer.ProtocolError(
'Claimed ID does not match (different subjects!), '
'Expected %s, got %s' %
(defragged_claimed_id, endpoint.claimed_id))
if to_match.server_url.startswith(u'https://www.google.com/a/'):
import urllib
local_id = u'https://www.google.com/accounts/o8/user-xrds?uri=%s' % urllib.quote_plus(to_match.local_id)
else:
local_id = to_match.getLocalID()
if local_id != endpoint.getLocalID():
raise consumer.ProtocolError('local_id mismatch. Expected %s, got %s' %
(local_id, endpoint.getLocalID()))
# If the server URL is None, this must be an OpenID 1
# response, because op_endpoint is a required parameter in
# OpenID 2. In that case, we don't actually care what the
# discovered server_url is, because signature checking or
# check_auth should take care of that check for us.
if to_match.server_url is None:
assert to_match.preferredNamespace() == OPENID1_NS, (
"""The code calling this must ensure that OpenID 2
responses have a non-none `openid.op_endpoint' and
that it is set as the `server_url' attribute of the
`to_match' endpoint.""")
elif to_match.server_url != endpoint.server_url:
raise consumer.ProtocolError('OP Endpoint mismatch. Expected %s, got %s' %
(to_match.server_url, endpoint.server_url))
| mit |
VitalPet/partner-contact | passport/tests/test_passport.py | 34 | 6735 | # -*- encoding: utf-8 -*-
#
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.tests.common import TransactionCase
from openerp.models import BaseModel
from datetime import date
class Base_Test_passport(TransactionCase):
"""
Simple test creating a passport
This is a base class for passport test cases.
Inherit from this and setup values.
"""
def setUp(self, vals=None):
"""
Setting up passport.
"""
if vals is None:
vals = {}
# Default test values
self.vals = {'name': 'This is a test passport name',
'number': 'A200124789',
'country_id': 1,
'expiration_date': date(2013, 11, 14),
'birth_date': date(1980, 11, 21),
'gender': 'male',
}
super(Base_Test_passport, self).setUp()
# Overwrite vals if needed
self.vals = dict(self.vals.items() + vals.items())
# Create the passport object; we will be testing this, so store in self
res_passport = self.registry('res.passport')
self.passport_id = res_passport.create(
self.cr, self.uid, self.vals, context=None)
def test_passport(self):
"""
Checking the passport creation.
"""
res_passport = self.registry('res.passport')
passport_obj = res_passport.browse(
self.cr, self.uid, self.passport_id, context=None)
for field in self.vals:
val = passport_obj[field]
if isinstance(val, BaseModel):
self.assertEquals(self.vals[field], val.id,
"IDs for %s don't match: (%i != %i)" %
(field, self.vals[field], val.id))
else:
self.assertEquals(str(self.vals[field]), str(val),
"Values for %s don't match: (%s != %s)" %
(field, str(self.vals[field]), str(val)))
class Test_passport_bad(Base_Test_passport):
"""
Simple test creating a passport, test against bad values
"""
def setUp(self):
"""
Setting up passport, then changing the values to test against.
"""
super(Test_passport_bad, self).setUp()
# Change vals to something wrong
self.vals = {
'name': 'This is the wrong passport name',
'number': 'A111111111',
'country_id': 0,
'expiration_date': date(1999, 11, 14),
'birth_date': date(1999, 11, 21),
'gender': '',
}
def test_passport(self):
"""
Checking the passport creation, assertions should all be false.
"""
res_passport = self.registry('res.passport')
passport_obj = res_passport.browse(
self.cr, self.uid, self.passport_id, context=None)
for field in self.vals:
val = passport_obj[field]
if isinstance(val, BaseModel):
self.assertNotEqual(self.vals[field], val.id,
"IDs for %s don't match: (%i != %i)" %
(field, self.vals[field], val.id))
else:
self.assertNotEqual(str(self.vals[field]), str(val),
"Values for %s don't match: (%s != %s)" %
(field, str(self.vals[field]), str(val)))
class Test_passport_name_get(TransactionCase):
"""
Test name_get
"""
def setUp(self):
"""
Setting up passport with name, country, either and none.
"""
super(Test_passport_name_get, self).setUp()
res_passport = self.registry('res.passport')
res_country = self.registry('res.country')
country = res_country.browse(self.cr, self.uid, 1, context=None)
self.name_on_passport = 'test name'
self.country_name = country.name_get()[0][1]
self.both = res_passport.create(
self.cr, self.uid, {'name': self.name_on_passport,
'country_id': country.id, },
context=None)
self.name_only = res_passport.create(
self.cr, self.uid, {'name': self.name_on_passport, },
context=None)
self.country_only = res_passport.create(
self.cr, self.uid, {'country_id': country.id, },
context=None)
self.neither = res_passport.create(
self.cr, self.uid, {},
context=None)
def test_passport(self):
"""
Checking the passport creation, assertions should all be false.
"""
res_passport = self.registry('res.passport')
both_obj = res_passport.browse(
self.cr, self.uid, self.both, context=None)
name_only = res_passport.browse(
self.cr, self.uid, self.name_only, context=None)
country_only = res_passport.browse(
self.cr, self.uid, self.country_only, context=None)
neither = res_passport.browse(
self.cr, self.uid, self.neither, context=None)
self.assertEquals(
both_obj.name_get()[0][1],
' | '.join((self.country_name, self.name_on_passport)),
'Error in passport name_get() with both country name and name on '
'passport.'
)
self.assertEquals(
name_only.name_get()[0][1], self.name_on_passport,
'Error in passport name_get() with only name on passport.'
)
self.assertEquals(
country_only.name_get()[0][1], self.country_name,
'Error in passport name_get() with only name of country.'
)
self.assertEquals(
neither.name_get()[0][1], '',
'Error in passport name_get() with neither country name nor name '
'on passport.'
)
| agpl-3.0 |
Sult/Mercenaries | mail/models.py | 1 | 2537 | from django.db import models
from characters.models import Character
from collections import namedtuple
class MailFolder(models.Model):
""" folders to hold and sort messages
Inbox, sent and thrash
"""
character = models.ForeignKey(Character)
name = models.CharField(max_length=31)
locked = models.BooleanField()
class Meta:
unique_together = ['character', 'name']
def __unicode__(self):
return "%s: %s" % (self.character, self.name)
@staticmethod
def create_standard_folders(character):
folders = ["inbox", "archive", "sent", "trash"]
for folder in folders:
new_folder = MailFolder(
character=character,
name=folder,
locked=True,
)
new_folder.save()
#order folders for mail view
@staticmethod
def order_folders(character):
folders = MailFolder.objects.filter(character=character)
list_folders = []
for folder in folders:
list_folders.append(folder)
return list_folders
#basic message
class Mail(models.Model):
""" message composition """
PLAYER = "player"
PROMOTION = "promotion"
NOTIFICATION = "notification"
INVITATION = "invitation"
BUSTOUT = "bustout"
ALLIANCE = "alliance"
MAILCATEGORIES = (
(PLAYER, "player"),
(PROMOTION, "promotion"),
(NOTIFICATION, "notification"),
(INVITATION, "invitation"),
(BUSTOUT, "bustout"),
(ALLIANCE, "alliance"),
)
folder = models.ForeignKey(MailFolder)
sender = models.ForeignKey(Character, related_name="+", null=True)
to = models.ForeignKey(Character, related_name="+")
read = models.BooleanField(default=False)
sent_at = models.DateTimeField(auto_now_add=True)
category = models.CharField(max_length=15, choices=MAILCATEGORIES)
subject = models.CharField(max_length=127)
body = models.TextField()
def __unicode__(self):
return "%s: %s: %s" % (self.to, self.subject, self.read)
def reply_format(self):
message = self.body
message = "> " + message
message = message.replace("\r\n", "\r\n> ")
send_information = "\r\n\r\n> To: %s\r\n> From: %s\r\n> Date: %s\r\n>\r\n" % (self.to, self.sender, self.view_sent_at())
message = send_information + message
return message
#format sent_at date
def view_sent_at(self):
return self.sent_at.strftime("%H:%M %d-%m-%Y")
#send mail
@staticmethod
def send_mail(to_character, category, subject, body):
new_mail = Mail(
folder=MailFolder.objects.get(character=to_character, name="inbox"),
category=category,
to=to_character,
subject=subject,
body=body,
)
new_mail.save()
| gpl-2.0 |
liorvh/infernal-twin | build/pillow/build/scripts-2.7/pilconvert.py | 9 | 2357 | #!/usr/bin/python
#
# The Python Imaging Library.
# $Id$
#
# convert image files
#
# History:
# 0.1 96-04-20 fl Created
# 0.2 96-10-04 fl Use draft mode when converting images
# 0.3 96-12-30 fl Optimize output (PNG, JPEG)
# 0.4 97-01-18 fl Made optimize an option (PNG, JPEG)
# 0.5 98-12-30 fl Fixed -f option (from Anthony Baxter)
#
from __future__ import print_function
import getopt
import string
import sys
from PIL import Image
def usage():
print("PIL Convert 0.5/1998-12-30 -- convert image files")
print("Usage: pilconvert [option] infile outfile")
print()
print("Options:")
print()
print(" -c <format> convert to format (default is given by extension)")
print()
print(" -g convert to greyscale")
print(" -p convert to palette image (using standard palette)")
print(" -r convert to rgb")
print()
print(" -o optimize output (trade speed for size)")
print(" -q <value> set compression quality (0-100, JPEG only)")
print()
print(" -f list supported file formats")
sys.exit(1)
if len(sys.argv) == 1:
usage()
try:
opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r")
except getopt.error as v:
print(v)
sys.exit(1)
output_format = None
convert = None
options = {}
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats (* indicates output format):")
for i in id:
if i in Image.SAVE:
print(i+"*", end=' ')
else:
print(i, end=' ')
sys.exit(1)
elif o == "-c":
output_format = a
if o == "-g":
convert = "L"
elif o == "-p":
convert = "P"
elif o == "-r":
convert = "RGB"
elif o == "-o":
options["optimize"] = 1
elif o == "-q":
options["quality"] = string.atoi(a)
if len(argv) != 2:
usage()
try:
im = Image.open(argv[0])
if convert and im.mode != convert:
im.draft(convert, im.size)
im = im.convert(convert)
if output_format:
im.save(argv[1], output_format, **options)
else:
im.save(argv[1], **options)
except:
print("cannot convert image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| gpl-3.0 |
rcarmo/fabric-templates | postgres-solr-redis-cluster/fabfile/config.py | 1 | 2325 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Configuration
This file contains shared constants/dictionaries for the fabfile
Created by: Rui Carmo
"""
# copy our configuration files to a specific location and set permissions
# (this is a sample for an old Postgres deployment)
skel = {
'postgres': [{
'path': '/etc/postgresql/9.2',
'owner': 'postgres:postgres',
'perms': '0644',
'recursive': True
}, {
'path': '/etc/postgresql/9.2/main/pg_hba.conf',
'owner': 'postgres:postgres',
'perms': '0644'
}, {
'path': '/etc/postgresql/9.2/main/pg_ident.conf',
'owner': 'postgres:postgres',
'perms': '0640'
}]
}
# External APT repositories
# these are the ones I commonly use for Postgres and Redis on Debian
repos = {
# Postgres repo
"pgdg": {
"key_name" : "PostgreSQL Debian Repository",
"key_url" : "http://apt.postgresql.org/pub/repos/apt/ACCC4CF8.asc",
"source_file": "deb http://apt.postgresql.org/pub/repos/apt/ wheezy-pgdg main"
},
# Redis repo
"dotdeb": {
"key_name" : "dotdeb.org",
"key_url" : "http://www.dotdeb.org/dotdeb.gpg",
"source_file": "deb http://mirrors.fe.up.pt/dotdeb/ wheezy all"
}
}
# Package groups I usually deploy on servers
packages = {
"base" : ['vim', 'htop', 'tmux', 'wget', 'netcat', 'rsync', 'bmon', 'speedometer', 'jpegoptim', 'imagemagick'],
"postgres": ['postgresql-9.2', 'postgresql-client-9.2', 'libpq-dev'],
"redis" : ['redis-server'],
"python" : ['python2.7-dev', 'libevent-dev', 'python-setuptools'],
"java" : ['openjdk-7-jre-headless'],
"pip" : [
"gunicorn==0.17.4",
"gevent==0.13.8",
"psycopg2==2.5",
"Pygments==1.6",
"celery-with-redis==3.0",
"nose==1.3.0",
"flower==0.5.1"
]
}
tarballs = {
"solr": {
'url' : 'http://mirrors.fe.up.pt/pub/apache/lucene/solr/4.4.0/solr-4.4.0.tgz',
'target': '/srv'
},
"zookeeper": {
'url' : 'http://mirrors.fe.up.pt/pub/apache/zookeeper/zookeeper-3.4.5/zookeeper-3.4.5.tar.gz',
'target': '/srv'
}
}
configuration_files = {
"/etc/profile.d/ourenv.sh": 'export OURSETTING1=OURVALUE1\nexport OURSETTING2=OURVALUE2'
}
| mit |
alexryndin/ambari | ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/params.py | 1 | 18188 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import status_params
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import os
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons.os_check import OSCheck
from resource_management.libraries.functions.constants import Direction
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.version import compare_versions, format_stack_version
from resource_management.libraries.functions.copy_tarball import STACK_VERSION_PATTERN
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
from resource_management.libraries import functions
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY
stack_name = default("/hostLevelParams/stack_name", None)
# node hostname
hostname = config["hostname"]
# This is expected to be of the form #.#.#.#
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
stack_version = format_stack_version(stack_version_unformatted)
stack_is_21 = False
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
# It cannot be used during the initial Cluser Install because the version is not yet known.
version = default("/commandParams/version", None)
# current host stack version
current_version = default("/hostLevelParams/current_version", None)
# Upgrade direction
upgrade_direction = default("/commandParams/upgrade_direction", None)
# When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
# downgrade_from_version provides the source-version the downgrade is happening from
downgrade_from_version = default("/commandParams/downgrade_from_version", None)
component_directory = status_params.component_directory
hadoop_bin_dir = "/usr/bin"
hadoop_home = '/usr'
hive_bin = '/usr/lib/hive/bin'
hive_lib = '/usr/lib/hive/lib'
#Hbase params keep hbase lib here,if not,mapreduce job doesn't work for hive.
hbase_lib = '/usr/iop/current/hbase-client/lib'
# Hadoop params
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_home = '/usr/iop/current/hadoop-client'
hive_bin = format('/usr/iop/current/{component_directory}/bin')
hive_lib = format('/usr/iop/current/{component_directory}/lib')
hive_var_lib = '/var/lib/hive'
# if this is a server action, then use the server binaries; smoke tests
# use the client binaries
command_role = default("/role", "")
server_role_dir_mapping = { 'HIVE_SERVER' : 'hive-server2',
'HIVE_METASTORE' : 'hive-metastore' }
if command_role in server_role_dir_mapping:
hive_server_root = server_role_dir_mapping[command_role]
hive_bin = format('/usr/iop/current/{hive_server_root}/bin')
hive_lib = format('/usr/iop/current/{hive_server_root}/lib')
hive_specific_configs_supported = False
hive_etc_dir_prefix = "/etc/hive"
limits_conf_dir = "/etc/security/limits.d"
hcat_conf_dir = '/etc/hive-hcatalog/conf'
config_dir = '/etc/hive-webhcat/conf'
hcat_lib = '/usr/iop/current/hive-webhcat/share/hcatalog'
webhcat_bin_dir = '/usr/iop/current/hive-webhcat/sbin'
# use the directories from status_params as they are already calculated for
# the correct version of BigInsights
hadoop_conf_dir = status_params.hadoop_conf_dir
hadoop_bin_dir = status_params.hadoop_bin_dir
webhcat_conf_dir = status_params.webhcat_conf_dir
hive_conf_dir = status_params.hive_conf_dir
hive_config_dir = status_params.hive_config_dir
hive_client_conf_dir = status_params.hive_client_conf_dir
hive_server_conf_dir = status_params.hive_server_conf_dir
hcat_lib = '/usr/iop/current/hive-webhcat/share/hcatalog'
webhcat_bin_dir = '/usr/iop/current/hive-webhcat/sbin'
component_directory = status_params.component_directory
hadoop_home = '/usr/iop/current/hadoop-client'
hive_bin = format('/usr/iop/current/{component_directory}/bin')
hive_lib = format('/usr/iop/current/{component_directory}/lib')
# there are no client versions of these, use server versions directly
hcat_lib = '/usr/iop/current/hive-webhcat/share/hcatalog'
webhcat_bin_dir = '/usr/iop/current/hive-webhcat/sbin'
# --- Tarballs ---
# DON'T CHANGE THESE VARIABLE NAMES
# Values don't change from those in copy_tarball.py
hive_tar_source = "/usr/iop/{0}/hive/hive.tar.gz".format(STACK_VERSION_PATTERN)
pig_tar_source = "/usr/iop/{0}/pig/pig.tar.gz".format(STACK_VERSION_PATTERN)
hive_tar_dest_file = "/iop/apps/{0}/hive/hive.tar.gz".format(STACK_VERSION_PATTERN)
pig_tar_dest_file = "/iop/apps/{0}/pig/pig.tar.gz".format(STACK_VERSION_PATTERN)
hadoop_streaming_tar_source = "/usr/iop/{0}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_VERSION_PATTERN)
sqoop_tar_source = "/usr/iop/{0}/sqoop/sqoop.tar.gz".format(STACK_VERSION_PATTERN)
hadoop_streaming_tar_dest_dir = "/iop/apps/{0}/mapreduce/".format(STACK_VERSION_PATTERN)
sqoop_tar_dest_dir = "/iop/apps/{0}/sqoop/".format(STACK_VERSION_PATTERN)
tarballs_mode = 0444
if Script.is_stack_greater_or_equal("4.1.0.0"):
# this is NOT a typo. BigInsights-4.1 configs for hcatalog/webhcat point to a
# specific directory which is NOT called 'conf'
hcat_conf_dir = '/usr/iop/current/hive-webhcat/etc/hcatalog'
config_dir = '/usr/iop/current/hive-webhcat/etc/webhcat'
if Script.is_stack_greater_or_equal("4.2.0.0"):
# need to set it to false if it is to downgrade from 4.2 to 4.1
if upgrade_direction is not None and upgrade_direction == Direction.DOWNGRADE and version is not None and compare_versions(format_stack_version(version), '4.2.0.0') < 0:
hive_specific_configs_supported = False
else:
#means it's either an upgrade or a fresh install of 4.2
hive_specific_configs_supported = True
else: #BI 4.0
#still need to use current dir due to rolling upgrade restrictions
# --- Tarballs ---
webhcat_apps_dir = "/apps/webhcat"
execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
#HACK Temporarily use dbType=azuredb while invoking schematool
if hive_metastore_db_type == "mssql":
hive_metastore_db_type = "azuredb"
#users
hive_user = config['configurations']['hive-env']['hive_user']
#JDBC driver jar name
hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
if hive_jdbc_driver == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
jdbc_jar_name = "sqljdbc4.jar"
jdbc_symlink_name = "mssql-jdbc-driver.jar"
elif hive_jdbc_driver == "com.mysql.jdbc.Driver":
jdbc_jar_name = "mysql-connector-java.jar"
jdbc_symlink_name = "mysql-jdbc-driver.jar"
elif hive_jdbc_driver == "org.postgresql.Driver":
jdbc_jar_name = "postgresql-jdbc.jar"
jdbc_symlink_name = "postgres-jdbc-driver.jar"
elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
jdbc_jar_name = "ojdbc.jar"
jdbc_symlink_name = "oracle-jdbc-driver.jar"
check_db_connection_jar_name = "DBConnectionVerification.jar"
check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
hive_jdbc_drivers_list = ["com.microsoft.sqlserver.jdbc.SQLServerDriver","com.mysql.jdbc.Driver","org.postgresql.Driver","oracle.jdbc.driver.OracleDriver"]
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
prepackaged_ojdbc_symlink = format("{hive_lib}/ojdbc6.jar")
templeton_port = config['configurations']['webhcat-site']['templeton.port']
#common
hive_metastore_hosts = config['clusterHostInfo']['hive_metastore_host']
hive_metastore_host = hive_metastore_hosts[0]
hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
hive_server_hosts = config['clusterHostInfo']['hive_server_host']
hive_transport_mode = config['configurations']['hive-site']['hive.server2.transport.mode']
if hive_transport_mode.lower() == "http":
hive_server_port = config['configurations']['hive-site']['hive.server2.thrift.http.port']
else:
hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
hive_http_endpoint = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
hive_server_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
hive_server2_authentication = config['configurations']['hive-site']['hive.server2.authentication']
# ssl options
hive_ssl = default('/configurations/hive-site/hive.server2.use.SSL', False)
hive_ssl_keystore_path = default('/configurations/hive-site/hive.server2.keystore.path', None)
hive_ssl_keystore_password = default('/configurations/hive-site/hive.server2.keystore.password', None)
smokeuser = config['configurations']['cluster-env']['smokeuser']
smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
fs_root = config['configurations']['core-site']['fs.defaultFS']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
hive_metastore_keytab_path = config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
hive_server2_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
#hive_env
hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
hive_pid_dir = status_params.hive_pid_dir
hive_pid = status_params.hive_pid
#Default conf dir for client
hive_conf_dirs_list = [hive_client_conf_dir]
if hostname in hive_metastore_hosts or hostname in hive_server_hosts:
hive_conf_dirs_list.append(hive_server_conf_dir)
#hive-site
hive_database_name = config['configurations']['hive-env']['hive_database_name']
hive_database = config['configurations']['hive-env']['hive_database']
#Starting hiveserver2
start_hiveserver2_script = 'startHiveserver2.sh.j2'
##Starting metastore
start_metastore_script = 'startMetastore.sh'
hive_metastore_pid = status_params.hive_metastore_pid
java_share_dir = '/usr/share/java'
driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']
user_group = config['configurations']['cluster-env']['user_group']
artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
target = format("{hive_lib}/{jdbc_jar_name}")
jdk_location = config['hostLevelParams']['jdk_location']
driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
start_hiveserver2_path = format("{tmp_dir}/start_hiveserver2_script")
start_metastore_path = format("{tmp_dir}/start_metastore_script")
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
hive_heapsize = config['configurations']['hive-env']['hive.heapsize']
else:
hive_heapsize = config['configurations']['hive-env']['hive.client.heapsize']
hive_metastore_heapsize = config['configurations']['hive-env']['hive.metastore.heapsize']
java64_home = config['hostLevelParams']['java_home']
java_version = int(config['hostLevelParams']['java_version'])
##### MYSQL
db_name = config['configurations']['hive-env']['hive_database_name']
mysql_group = 'mysql'
mysql_host = config['clusterHostInfo']['hive_mysql_host']
mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
mysql_deluser_path = format("{tmp_dir}/removeMysqlUser.sh")
######## Metastore Schema
init_metastore_schema = False
if Script.is_stack_greater_or_equal("4.1.0.0"):
init_metastore_schema = True
########## HCAT
hcat_dbroot = hcat_lib
hcat_user = config['configurations']['hive-env']['hcat_user']
webhcat_user = config['configurations']['hive-env']['webhcat_user']
hcat_pid_dir = status_params.hcat_pid_dir
hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
hcat_env_sh_template = config['configurations']['hcat-env']['content']
#hive-log4j.properties.template
if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
log4j_props = config['configurations']['hive-log4j']['content']
else:
log4j_props = None
#webhcat-log4j.properties.template
if (('webhcat-log4j' in config['configurations']) and ('content' in config['configurations']['webhcat-log4j'])):
log4j_webhcat_props = config['configurations']['webhcat-log4j']['content']
else:
log4j_webhcat_props = None
#hive-exec-log4j.properties.template
if (('hive-exec-log4j' in config['configurations']) and ('content' in config['configurations']['hive-exec-log4j'])):
log4j_exec_props = config['configurations']['hive-exec-log4j']['content']
else:
log4j_exec_props = None
daemon_name = status_params.daemon_name
process_name = status_params.process_name
hive_env_sh_template = config['configurations']['hive-env']['content']
hive_hdfs_user_dir = format("/user/{hive_user}")
hive_hdfs_user_mode = 0700
hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
hive_exec_scratchdir = config['configurations']['hive-site']["hive.exec.scratchdir"]
#for create_hdfs_directory
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
# Tez-related properties
tez_user = None #config['configurations']['tez-env']['tez_user']
# Tez jars
tez_local_api_jars = None #'/usr/lib/tez/tez*.jar'
tez_local_lib_jars = None #'/usr/lib/tez/lib/*.jar'
# Tez libraries
tez_lib_uris = None #default("/configurations/tez-site/tez.lib.uris", None)
if OSCheck.is_ubuntu_family():
mysql_configname = '/etc/mysql/my.cnf'
else:
mysql_configname = '/etc/my.cnf'
mysql_user = 'mysql'
# Hive security
hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
hive_use_existing_db = hive_database.startswith('Existing')
hive_exclude_packages = []
# There are other packages that contain /usr/share/java/mysql-connector-java.jar (like libmysql-java),
# trying to install mysql-connector-java upon them can cause packages to conflict.
if hive_use_existing_db:
hive_exclude_packages = ['mysql-connector-java', 'mysql', 'mysql-server',
'mysql-community-release', 'mysql-community-server']
else:
if 'role' in config and config['role'] != "MYSQL_SERVER":
hive_exclude_packages = ['mysql', 'mysql-server', 'mysql-community-release',
'mysql-community-server']
if os.path.exists(mysql_jdbc_driver_jar):
hive_exclude_packages.append('mysql-connector-java')
hive_site_config = dict(config['configurations']['hive-site'])
########################################################
########### WebHCat related params #####################
########################################################
webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
templeton_pid_dir = status_params.hcat_pid_dir
webhcat_pid_file = status_params.webhcat_pid_file
templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
hcat_hdfs_user_dir = format("/user/{hcat_user}")
hcat_hdfs_user_mode = 0755
webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
webhcat_hdfs_user_mode = 0755
#for create_hdfs_directory
security_param = "true" if security_enabled else "false"
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
import functools
#create partial functions with common arguments for every HdfsResource call
#to create hdfs directory we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user = hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs
)
if security_enabled:
hive_principal = hive_server_principal.replace('_HOST',hostname.lower())
| apache-2.0 |
Kate-Willett/HadISDH_Build | F10_MissingAdjUnc_AdjPlots.py | 1 | 30317 | # PYTHON 3
# >module load scitools/default-current
#
# Author: Kate Willett
# Created: 1 February 2013 (IDL)
# Last update: 1 December 2020 (Python 3 from 1st Dec 2020)
# Location: /home/h04/hadkw/HadISDH_Code/HADISDH_BUILD/
# GitHub: https://github.com/Kate-Willett/HadISDH_Build
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code finds the missed adjustment uncertainty for each variable by looking at a gaussian fit to the distribution of adjustments
# and the actual distribution. The missed adjustment uncertainty is the standard deviation of the difference.
#
# This code looks at all of the adjustments allocated to all stations for a variable
# It plots a distribution of adjustment magnitude and a time series of adjustment frequency.(optional plot output)
# It also assesses the mean and standard deviation of the adjustments in absolute and actual space (also median)
# It also fits a gaussian to the distribution, adjusts it to include the 'fatter tails' and then
# takes the difference between the two to identify the 'missing adjustments / missing middle'
# The standard deviation of the missed adjustment distribution is taken as the 1 sigma uncertainty
# remaining in the data from missed adjustments
#
# Mean number of changepoints and distribution stats are output to file for read in by other programs
#
# Plots are output and a list of stations and adjustments from largest to smallest.
#
# <references to related published material, e.g. that describes data set>
#
# -----------------------
# LIST OF MODULES
# -----------------------
# <List of program modules required to run the code, or link to compiler/batch file>
#
# -----------------------
# DATA
# -----------------------
# inlist: list of stations to work through
# /scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/goodforHadISDH.'+version+'_PHAdpd.txt'
# NB - IF YOU RERUN LATER YOU WILL HAVE TO SWAP TO USING _KeptLarge.txt!!!
# inlog: list of adjustment locations and magnitudes for each station
# /scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/HadISDH.landDPD.'+version+'_PHA.log'
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# Go through everything in the 'Start' section to make sure dates, versions and filepaths are up to date
# If we're using the Config file (F1_HadISDHBuildConfig.txt) then make sure that is correct.
#>./F10_submit_spice.bash - this will run all variables so you will have to comment out any you do not wish to run
#or
#>module load scitools/default-current # for Python 3
#>python F10_MissingAdjUncPlots.py --var <var>
#
# <var> can be q, rh, t, td, tw, e, dpd
#
# -----------------------
# OUTPUT
# -----------------------
# outplots:
# /scratch/hadkw/UPDATE<YYYY>/IMAGES/BUILD/HadISDH.landDPD.'+versiondots+'_adjspread_PHA.eps'
# outadjs: list of stations and adjustments from largest to smallest
# /scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/Largest_Adjs_landDPD.'+versiondots+'_PHA.txt'
# outstats: for each variable/homogtype a list of stats is output
# /scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/Adjs_Stats.'+versiondots+.txt'
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 4 (1 December 2020)
# ---------
#
# Enhancements
#
# Changes
# This is now Python 3 rather than IDL
#
# Bug fixes
#
#
# Version 3 (6 February 2018)
# ---------
#
# Enhancements
# This program now finds the number of stations in the candidate variable station
# list so that it does not need to be put in manually before running.
# Next I should pull out the variable and homogtype so that it is stated
# at the command line rather than changed within the file before running.
#
# Now has param (variable) and homogtype called at the command line so you only need to edit the file once per year
#
# This now outputs a list of stats for each variable/homogtype to file (appends) so that other programs can read in
#
# This now has a 'KeptLarge' true/false switch to easily switch when running after the first inital run where you need
# to use the _KeptLarge.txt station lists
#
# Changes
#
# Bug fixes
#
# Version 2 (27 January 2017)
# ---------
#
# Enhancements
# General tidy up and move of all editables to the top
#
# Changes
#
# Bug fixes
#
# Version 1 (15 January 2015)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# START
#************************************************************************
# USE python3
# module load scitools/default-current
# python F10_MissingAdjUnc_AdjPlots.py --var <var>
#
# For debugging
# ipython
# %pdb
# %run F10_MissingAdjUnc_AdjPlots.py <var>
#
# REQUIRES
# ReadNetCDF.py
#
#************************************************************************
# Set up python imports
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.dates import date2num,num2date
import sys, os, getopt
from scipy.optimize import curve_fit,fsolve,leastsq
#from lmfit import Model
from scipy import pi,sqrt,exp
from scipy.special import erf
import scipy.stats
from math import sqrt,pi
import struct
import pdb
#import netCDF4 as nc4
from subprocess import check_output
from subprocess import call
#import ReadNetCDF
#from GetNiceTimes import MakeDaysSince
# Start and end years if HardWire = 1
styear = 1973
edyear = 2019
# Dataset version if HardWire = 1
versiondots = '4.2.0.2019f'
version = 'v420_2019f'
hadisdversiondots = '3.1.0.2019f'
hadisdversion = 'v310_2019f'
# HARDWIRED SET UP!!!
# If HardWire = 1 then program reads from the above run choices
# If HardWire = 0 then program reads in from F1_HadISDHBuildConfig.txt
HardWire = 0
if (HardWire == 0):
#' Read in the config file to get all of the info
with open('F1_HadISDHBuildConfig.txt') as f:
ConfigDict = dict(x.rstrip().split('=', 1) for x in f)
versiondots = ConfigDict['VersionDots']
hadisdversiondots = ConfigDict['HadISDVersionDots']
styear = ConfigDict['StartYear']
edyear = ConfigDict['EndYear']
# Do you want to run for plots only - not output stats?
PlotOnly = False # False = plots and output stats, True = plots only
# Growth Factor for Pseudomax of Gaussian Fitting
GrowthFactor = 1.5 # Tried 1.2, bit small
# Do you want to run for a second time (e.g., for PlotOnly = True) where station lists have already been saved as _KeptLarge.txt?
KeptLarge = False # TRUE (run with _KeptLarge station lists or FALSE (normal, first time run)
if (KeptLarge):
KL = '_KeptLarge'
else:
KL = ''
# Set up directories locations
updateyy = str(edyear)[2:4]
updateyyyy = str(edyear)
workingdir = '/scratch/hadkw/UPDATE'+updateyyyy
#workingdir = '/data/users/hadkw/WORKING_HADISDH/UPDATE'+updateyyyy
OUTPLOTDIR = workingdir+'/IMAGES/BUILD/'
# Set up filenames
INDIR = workingdir+'/LISTS_DOCS/'
OUTPUTLOG = workingdir+'/LISTS_DOCS/OutputLogFile'+versiondots+'.txt'
#OUTSTATS = workingdir+'/LISTS_DOCS/Adjs_Stats.'+versiondots+'.txt'
# Set up variables
MDI = -1e+30
#*** at some point add all the header info from the new HadISD files***
# Dictionaries for param, units, homogtype, binsize
ParamDict = dict([('q',['q','g/kg','IDPHA','PHA', 0.05]),
('rh',['RH','%rh','IDPHA','PHA', 0.5]),
('t',['T','deg C','IDPHA','PHA', 0.05]), # Note this needs to be changed to IDPHAMG later
('td',['Td','deg C','PHADPD','PHA', 0.1]),
('tw',['Tw','deg C','IDPHA','PHA', 0.05]),
('e',['e','hPa','IDPHA','PHA', 0.05]),
('dpd',['DPD','deg C','PHA','PHA', 0.1])])
#************************************************************************
# Subroutines
#************************************************************************
# READDATA
def ReadData(FileName,typee,delimee):
''' Use numpy genfromtxt reading to read in all rows from a complex array '''
''' Need to specify format as it is complex '''
''' outputs an array of tuples that in turn need to be subscripted by their names defaults f0...f8 '''
return np.genfromtxt(FileName, dtype=typee, delimiter=delimee, encoding='latin-1') # ReadData
# return np.genfromtxt(FileName, dtype=typee, delimiter=delimee) # ReadData
#************************************************************************
# GAUSSIAN
def Gaussian(x, amp, cen, wid):
' A Gaussian model for curve fitting '
' x is the x axis points '
' amp is the maximum amplitude '
' cen is the mean '
' wid is the standard deviation '
return amp * exp(-(x-cen)**2 / (2*wid**2))
#****************************************************************************
# GETADJUSTMENTARRAYS
def GetAdjustmentArrays(INFIL, MCount, SCount, HType, WMOList, WBANList):
' This opens the adjustment list, searches for the adjustments for each station, adds them to appropriate lists '
' Inputs: '
' INFIL = string filepath and name for list of adjustments '
' MCount = integer number of months in time series '
' SCount = integer number of stations in dataset '
' HType = string homogtype eg. PHA, IDPHA, PHADPD '
' WMOList = list of WMO IDs '
' WBANList = list of WBAN IDs '
' Returns: '
' Adj_Locs = MCount integer array counting number of changepoints at each time point across entire dataset '
' Adj_Mags_Accum = Float array of all adjustments as read from file - accumalated magnitudes '
' Adj_Mags_Act = Float array of all adjustments as actual individual magnitudes '
' Adj_WMOs = integer array of WMO IDs to match each adjustment listed in Adj_Mags_Act and Adj_Mags_Accum '
GAdj_Locs = np.repeat(0,MCount) # integer array for storing locations of changepoints
GAdj_Mags_Accum = [] # grow this array on the fly
GAdj_Mags_Act = []
GAdj_WMOs = []
GAdj_WBANs = []
# # Gunzip PHA output file
print(INFIL)
# call(['gunzip',INFIL+'.gz'])
# loop through station by station to get all of the adjustments and locations
for st in range(SCount):
# find homog adj for this station and append to array
if (HType == 'PHA'):
#PHA - 0=ID, 3=stmon,6=edmon, 8=ibreak, 9=cbreak, 10=adj, 11=eadj
edmonget = 6
adjget = 10
moo = check_output(['grep','-a','^Adj write:'+WMOList[st]+WBANList[st],INFIL])
# Now sort out this string which is a byte array
# This creates a list of strings for each adjustment with a blank string at the beginning
moo = moo.decode("utf-8").split('Adj write:')
# Remove the blank string
moo.remove('')
elif (HType == 'IDPHAMG') or (HType == 'PHADPD'):
#IDPHAMG - 0=ID, 2=stmon, 3=edmon, 6=adj, 7=eadj, 8=adj source indicator
edmonget = 3
adjget = 6
moo = check_output(['grep','-a','^'+WMOList[st]+WBANList[st],INFIL])
# Now sort out this string which is a byte array
# This creates a list of strings for each adjustment with a blank string at the beginning
moo = moo.decode("utf-8").split('\n') # no space
# Remove the blank string at the end
moo.remove('')
else:
#IDPHA - 0=ID, 2=stmon, 3=edmon, 6=adj, 7=eadj
edmonget = 3
adjget = 6
moo = check_output(['grep','-a','^'+WMOList[st]+WBANList[st],INFIL])
# Now sort out this string which is a byte array
# This creates a list of strings for each adjustment with a blank string at the beginning
moo = moo.decode("utf-8").split(' \n')
# Remove the blank string at the end
moo.remove('')
# Strip the \n newline characters, random letters and split the strings to make a list of lists
# b, i, p in IDPHAMG
moo = [i.strip(' ABCDEFGHIJKLMNOPQRSTUVWXYZbip\n').split() for i in moo]
# print('Check adjustment read in')
# pdb.set_trace()
# Good for IDPHA and PHA
# Now loop through the adjustments to append to array
AdjVals = []
# Ignore first line as this is most recent period so adjustment is 0
for rec,adjstr in enumerate(moo[1:]):
Adj = -(np.copy(np.float(adjstr[adjget])))
#print(Adj)
# Break location
Loc = np.int(adjstr[edmonget]) - 1
GAdj_Locs[Loc] += 1 # increment this location by 1
GAdj_Mags_Accum.append(Adj)
AdjVals.append(Adj)
GAdj_WMOs.append(WMOList[st])
GAdj_WBANs.append(WBANList[st])
# Now get the actual adjustments from AdjVals
if (rec == 0):
GAdj_Mags_Act.append(AdjVals[0])
else:
GAdj_Mags_Act.append(AdjVals[rec] - AdjVals[rec-1])
# print('Check adjustment arrays')
# pdb.set_trace()
# Good for IDPHA
# # gzip PHA output file for tidiness
# call(['gzip',INFIL])
return GAdj_Locs, GAdj_Mags_Accum, GAdj_Mags_Act, GAdj_WMOs, GAdj_WBANs
#******************************************************************
# GETHIST
def GetHist(BinSz, GAdj_Mags_Act):
' Works out the things needed for creating a histogram of adjustments and returns histogram and stats '
' Inputs: '
' BinSz = float size of bins '
' GAdj_Mags_Act = Float array of actual adjustment magnitudes '
' Returns: '
' GHistAdjsMagsAct = Histogram of adjustments - Int array of counts for each point of histogram '
' GXarr = Float array of bin midpoints '
' GBinArr = Float array of bin points from leftmost to include final rightmost '
' GMeanAdj = float mean of all adjustment magnitudes '
' GStdAdj = float standard deviation of all adjustment magnitudes '
' GMaxFreq = int maximum count for a bin within the histogram '
# Set up the bins for the histogram
minX = np.floor(np.min(GAdj_Mags_Act)) - (BinSz/2.)
maxX = np.ceil(np.max(GAdj_Mags_Act)) + (BinSz/2.)
# Make the bins symmetrical - not quite sure why - its nice?
if (abs(minX) > maxX):
maxX = abs(minX)
else:
minX = -(maxX)
# np.linspace better for floats than np.arange which gives inconsistent results for some reason
GBinArr = np.linspace(minX,maxX,np.round(((maxX - minX) / BinSz)) + 1)
GXarr = GBinArr[0:-1] + (BinSz/2.)
# Get the histogram of the Actual Adjustment Magnitudes Adj_Mags_Act
HistRes = plt.hist(GAdj_Mags_Act,bins=GBinArr) # [0] = histogram, [1] = bins
GHistAdjMagsAct = HistRes[0]
# Find the mean and standard deviation of adjustments
GMeanAdj = np.mean(GAdj_Mags_Act)
GStdAdj = np.std(GAdj_Mags_Act)
# Find the max value in the histogram
GMaxFreq = np.max(GHistAdjMagsAct)
# # CHECK THIS!!!
# print('Check Bins')
# pdb.set_trace()
return GHistAdjMagsAct, GXarr, GBinArr, GMeanAdj, GStdAdj, GMaxFreq
#*****************************************************************
# GETNEWMIDDLE
def GetNewMiddle(GHistAdjMagsAct, GXarr, BinSz, GMeanAdj, GMaxFreq):
' Find out if there is a dip or plateau in the middle '
' This depends on well behaved histograms that do not ahve any unusual peaks other than a missing middle. '
' 1. Split histogram below and =/above the mean '
' 2. Find local max of lower half and upper half '
' 3. Set all lower values (if any) after local max in lower half to max of histogram '
' - if there are lower values then set FixLow = True '
' 4. Set all lower values (if any) before local max in upper half to max of histogram '
' - if there are lower values then set FixHigh = True '
' - if there are lower values then set first point of upper value (closest to mean) to GrowthFactor * max of histogram (pseudomax) '
' 5. If there is no dip in upper values, so no pseudomax set, then if there is a dip in the lower half: '
' - set last point of lower half (closest to mean) to GrowthFactor * max of histogram (pseudomax) '
' 6. Merge new low and high values and return '
' Inputs: '
' GHistAdjMagsAct = Int array of histogram of adjustment magnitudes '
' GXarr = float array of bin midpoints '
' BinSz = float bin size '
' GMeanAdj = float mean of adjustment magnitudes '
' GMaxFreq = int max of histogram '
' Returns: '
' GNewHistAdjMagsAct = float array of adjustment magnitudes with missing middle set to max values '
' FixLow = True if a dip is found, False if not '
' FixHigh = True if a dip is found, False if not '
FixLow = False
FixHigh = False
# Search for max in everything below the mean
LowHalf = GHistAdjMagsAct[np.where((GXarr + (BinSz / 2.)) < GMeanAdj)]
LowHalfMax = np.max(LowHalf)
GotMax = np.where(LowHalf == LowHalfMax)[0]
# pdb.set_trace()
# If the max is before the end of the array then set all after max to max
if (GotMax[0] < (len(LowHalf)-1)):
LowHalf[GotMax[0]+1:] = GMaxFreq
FixLow = True
# print('Found dip in low half')
# Search for max in everything above the mean
HighHalf = GHistAdjMagsAct[np.where((GXarr + (BinSz / 2.)) >= GMeanAdj)]
HighHalfMax = np.max(HighHalf)
GotMax = np.where(HighHalf == HighHalfMax)[0]
# pdb.set_trace()
# If the max is after the beginning of the array then set all before max to max and first value (closest to mean) to max*GrowthFactor
if (GotMax[0] > 0):
HighHalf[:GotMax[0]] = GMaxFreq # do not need + 1 here because I don't want to reset the local maximum
HighHalf[0] = GMaxFreq * GrowthFactor
FixHigh = True
# print('Found dip in high half - setting pseudomax')
# Run a check that if no pseudomax value has been set in HighHalf because there is no dip then one shoudl be set in LowHalf if there is a dip there
elif (FixLow):
LowHalf[-1] = GMaxFreq * GrowthFactor
# print('Set pseudomax in low half')
# Merge Low and High half to create new histogram with completed middle
GNewHistAdjMagsAct = np.append(LowHalf,HighHalf)
# print('Check LowHalf and HighHalf')
# pdb.set_trace()
# Might need some threshold to detect the dip - if just one or two values is it really?
# For q IDPHA only one value in dip but fit is better with pseudomax set.
return GNewHistAdjMagsAct, FixLow, FixHigh
#***********************************************************
# OUTPUTSTATS
def OutPutStats(OUTLOG, OUTADJS, GAdj_Mags_Act, GDiffValsArr, GAdj_WMOs, GAdj_WBANs, GVar, HType, SCount):
' Write out statistics to OUTPUTLOG and OUTADJS '
' Inputs: '
' OUTLOG = string filepath and name for OUTPUTLOG '
' OUTLIST = string filepath and name for OUTADJS '
' GAdj_Mags_Act = float array of all adjustment magnitudes '
' GDiffValsArr = float array of missing adjustments '
' GAdj_WMOs = string array of WMO IDs for each of GAdj_Mags_Act '
' GAdj_WBANs = string array of WBAN IDs for each of GAdj_Mags_Act '
' GVar = string varname '
' HType = string homogtype '
' SCount = integer number of stations '
' Returns: '
' Nothing '
# Get the sorted absolute adjustments
Abs_Adj_Mags_Act = np.sort(abs(np.array(GAdj_Mags_Act)))
filee = open(OUTLOG,'a+')
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_ABS_MEAN=',np.mean(Abs_Adj_Mags_Act)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_ABS_MEDIAN=',np.median(Abs_Adj_Mags_Act)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_ABS_STD=',np.std(Abs_Adj_Mags_Act)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_MEAN=',np.mean(GAdj_Mags_Act)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_MEDIAN=',np.median(GAdj_Mags_Act)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_STD=',np.std(GAdj_Mags_Act)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_MEAN_GAUSSDIFFS=',np.mean(GDiffValsArr)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_STD_GAUSSDIFFS=',np.std(GDiffValsArr)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_MEAN_ADJ_NO=',len(GAdj_Mags_Act) / float(SCount)))
filee.close()
# Get sorted array of adjustments for kicking out largest
GAdj_Mags_Act = np.array(GAdj_Mags_Act)
OrderAdj = np.flip(np.argsort(abs(GAdj_Mags_Act))) # gives the index of the sorted values, largest to smallest
# pdb.set_trace()
# Sorted_Adj_Mags_Act = np.flip(np.sort(GAdj_Mags_Act)) # now sorted and largest to smallest
Sorted_Adj_Mags_Act = GAdj_Mags_Act[OrderAdj] # now sorted and largest to smallest
# Print out these in order
filee = open(OUTADJS,'a+')
for i, AdjVal in enumerate(Sorted_Adj_Mags_Act):
stradj = '% 7.2f' % AdjVal
filee.write('%s%s%s%s\n' % (GAdj_WMOs[OrderAdj[i]],GAdj_WBANs[OrderAdj[i]],' ', stradj))
filee.close()
return
#********************************************************************************
def PlotAdjs(OutFile, GHistAdjMagsAct, GGaussCurve, GMergeFit, GDiffArr, GDiffValsArr, GXarr, GAdj_Locs, MCount, StYr, EdYr, Unit):
''' Plot histogram of adjustments and estimates for missing middle '''
''' Plot time series of location of changepoints '''
# Set up for 2 panel plot
plt.clf()
f,axarr=plt.subplots(2,figsize=(7,10),sharex=False) #6,18
# Set up the historgram
axarr[0].set_position([0.1,0.6,0.85,0.35])
axarr[0].set_xlim([GXarr[0],GXarr[-1]])
axarr[0].plot(GXarr,GHistAdjMagsAct,'.',c='black') #linewidth=0.25)
axarr[0].plot(GXarr,GGaussCurve,c='lightgrey') #,linewidth=0.25)
axarr[0].plot(GXarr,GMergeFit,c='red') #, linewidth=0.25)
axarr[0].plot(GXarr,GDiffArr,':',c='blue') #,linewidth=0.25)
axarr[0].annotate('a)',xy=(0.03,0.9), xycoords='axes fraction',size=12)
MnDfs = '%7.3f' % np.mean(GDiffValsArr)
axarr[0].annotate('Mean of Diffs: '+MnDfs,xy=(0.03,0.8), xycoords='axes fraction',size=14)
StdDfs = '%7.3f' % np.std(GDiffValsArr)
axarr[0].annotate('Std of Diffs: '+StdDfs,xy=(0.03,0.7), xycoords='axes fraction',size=14)
axarr[0].set_xlabel('Adjustment Magnitude '+Unit,size=16)
axarr[0].set_ylabel('Frequency',size=16)
# Set up the time series
TheMonths = []
yr = int(StYr)
mon = 1
for m in range(MCount):
TheMonths.append(dt.date(yr,mon,1))
mon=mon+1
if mon == 13:
mon = 1
yr = yr + 1
axarr[1].set_position([0.1,0.1,0.85,0.35])
axarr[1].set_xlim([TheMonths[0],TheMonths[-1]])
axarr[1].plot(TheMonths,GAdj_Locs,c='black') #, linewidth=0.25)
axarr[1].annotate('b)',xy=(0.03,0.9), xycoords='axes fraction',size=12)
axarr[1].set_xlabel('Time',size=16)
axarr[1].set_ylabel('Frequency',size=16)
#plt.show()
plt.savefig(OutFile+".eps")
plt.savefig(OutFile+".png")
return #PlotHomogTS
#******************************************
# MAIN
#************************************************
def main(argv):
# INPUT PARAMETERS AS STRINGS!!!!
var = 'q' # 'q','rh','e','td','tw','t','dpd'
try:
opts, args = getopt.getopt(argv, "hi:",
["var="])
except getopt.GetoptError:
print('Usage (as strings) F10_MissingAdjUnc_AdjPlots.py --var <q>')
sys.exit(2)
for opt, arg in opts:
if opt == "--var":
try:
var = arg
except:
sys.exit("Failed: var not a string")
# HARDWIRE THIS IF YOU WANT TO ONLY LOOK AT PHA!!!
homogtype = ParamDict[var][2] # THIS IS THE OPERATIONAL RUN (IDPHA, IDPHAMG, PHA, PHADPD)
print('WORKING ON: ',var, homogtype)
# pdb.set_trace()
# Collect all of the adjustment information
# Now set up files
var2 = ParamDict[var][0] #'DPD','RH','Td','T','Tw','e','q
INSTATLIST = INDIR+'goodforHadISDH.'+versiondots+'_'+homogtype+var+KL+'.txt'
# INSTATLIST = INDIR+'goodforHadISDH.'+versiondots+'_'+homogtype+var+'_JAN2020'+KL+'.txt'
# T IDPHA is Merged with PHA so homogtype is now IDPHAMG
if (var == 't'):
homogtype = homogtype+'MG'
INADJLIST = INDIR+'HadISDH.land'+var2+'.'+versiondots+'_'+homogtype+'.log'
OUTPLOTS = OUTPLOTDIR+'HadISDH.land'+var2+'.'+versiondots+'_adjspread_'+homogtype
OUTADJS = INDIR+'Largest_Adjs_land'+var2+'.'+versiondots+'_'+homogtype+'.txt'
# read in station list
MyTypes = ["|U6","|U5","float","float","float","|U4","|U30","|U7","int"]
#MyTypes = ("|S6","|S5","float","float","float","|S4","|S30","|S7","int")
MyDelimiters = [6,5,8,10,7,4,30,7,5]
RawData = ReadData(INSTATLIST,MyTypes,MyDelimiters)
StationListWMO = np.array(RawData['f0'])
StationListWBAN = np.array(RawData['f1'])
StationListLat = np.array(RawData['f2'])
StationListLon = np.array(RawData['f3'])
StationListElev = np.array(RawData['f4'])
StationListCID = np.array(RawData['f5'])
StationListName = np.array(RawData['f6'])
nstations = len(StationListWMO)
# Set up arrays
nyrs = (int(edyear)+1)-int(styear)
nmons = nyrs*12
# int_mons = indgen(nmons)
# Read in adjustments into arrays
Adj_Locs, Adj_Mags_Accum, Adj_Mags_Act, Adj_WMOs, Adj_WBANs = GetAdjustmentArrays(INADJLIST, nmons, nstations, homogtype, StationListWMO, StationListWBAN)
# Calculate the required statistics
# Very difficult to get a best fit gaussian to data with a missing middle as we have to assume something about the middle
# Missing middle doesn't appear to be an issue for all - IDPHA for q and T appears to fill in, less so for RH
# Get histogram of adjustments
# Find middle - if there is a dip select all values between local peaks
# Set these to the maximum of the other remaining values
# Set the middle of the dip to GrowthFactor*max of remaining values to improve the amplitude gaussian fit.
# Find the max of the full histogram and mean and standard deviation of all adjustments
# Find the best fit curve for the nanned version - set amp=np.max, cen=mean(adj), wid=sd(adj), set bounds to max+max*GrowthFactor (20%), mean+-0.1,st+/-0.2
# It will likely fit to given lower bound
# Get the curve using gaussian model
# CHECK THE CURVE!!!
# This sounds like a fudge but I set the middle value in IDL to ~4000 so its the same thing.
# Get the histogram of adjustments for dataset
BinSize = ParamDict[var][4]
HistAdjMagsAct, Xarr, BinArray, MeanAdj, StdAdj, MaxFreq = GetHist(BinSize, Adj_Mags_Act)
# print('Hist stats: ',MaxFreq, MeanAdj, StdAdj)
# Find out if there is a dip or plateau in the middle
# This depends on well behaved histograms that don't ahve any unusual peaks other than a missing middle.
NewHistAdjMagsAct, FixingLow, FixingHigh = GetNewMiddle(HistAdjMagsAct, Xarr, BinSize, MeanAdj, MaxFreq)
# print('Did we need to fix middle? L? H?',FixingLow, FixingHigh)
# If there is no dip or plateau just fit a gaussian anyway?
# Get a best fit gaussian curve, with bounds if FixingLow or FixingHigh is True
if (FixingLow) or (FixingHigh):
# Add dip info to log file
if (PlotOnly == False): # then we're getting all the stats
filee = open(OUTPUTLOG,'a+')
filee.write('%s%s%s%s\n' % (var,'_',homogtype,'_DIP_FOUND=True'))
filee.close()
bv, covar = curve_fit(Gaussian,Xarr,NewHistAdjMagsAct,p0=[MaxFreq,MeanAdj,StdAdj],bounds=([MaxFreq,MeanAdj-1.,StdAdj-1.],[MaxFreq*GrowthFactor,MeanAdj+1,StdAdj+1],))
bvo, covaro = curve_fit(Gaussian,Xarr,HistAdjMagsAct,p0=[MaxFreq,MeanAdj,StdAdj])
# print('Running with bounds because we found a dip')
GaussCurve = Gaussian(Xarr,bv[0],bv[1],bv[2])
GaussCurveOLD = Gaussian(Xarr,bvo[0],bvo[1],bvo[2])
else:
# Add dip info to log file
if (PlotOnly == False): # then we're getting all the stats
filee = open(OUTPUTLOG,'a+')
filee.write('%s%s%s%s\n' % (var,'_',homogtype,'_DIP_FOUND=False'))
filee.close()
bv, covar = curve_fit(Gaussian,Xarr,NewHistAdjMagsAct,p0=[MaxFreq,MeanAdj,StdAdj])
GaussCurve = Gaussian(Xarr,bv[0],bv[1],bv[2])
# print('Check bv output for amp, mean, std and the best fit curve')
# pdb.set_trace()
# plt.clf()
# plt.plot(Xarr,NewHistAdjMagsAct,'+')
# plt.plot(Xarr,HistAdjMagsAct,'.')
# plt.plot(Xarr,GaussCurve,'red')
# if (FixingLow) or (FixingHigh):
# plt.plot(Xarr,GaussCurveOLD,'grey')
# plt.show()
# pdb.set_trace()
# Merge the GaussCurve and HistAdjMagsAct to pick up the maximum values in each case - the fatter tails of HistAdjMagsAct and the Missing Middle?
MergeFit = np.maximum(HistAdjMagsAct,GaussCurve)
# Get the difference between the MergeFit and HistAdjMagsAct
DiffArr = MergeFit - HistAdjMagsAct
# print('Check what happens when HistAdjMagsAct = 0 and the DiffArr')
# plt.clf()
# plt.plot(Xarr,NewHistAdjMagsAct,'+')
# plt.plot(Xarr,HistAdjMagsAct,'.')
# plt.plot(Xarr,GaussCurve,'red')
# plt.plot(Xarr,MergeFit,'blue')
# plt.plot(Xarr,DiffArr,'orange')
# plt.show()
# pdb.set_trace()
# Convert the differences into values using the BinArray locations and numbers in DiffArr
# DiffArr should not be less that 0 in any place
DiffValsArr=[]
for i,Num in enumerate(DiffArr):
print(i,Num)
if (np.round(Num) > 0):
DiffValsArr = np.append(DiffValsArr,np.repeat((BinArray[i] + (BinSize / 2.)),np.round(Num)))
# print('Check setting diff vals')
# pdb.set_trace()
# OUtput these stats if PlotOnly = False
if (PlotOnly == False):
OutPutStats(OUTPUTLOG, OUTADJS, Adj_Mags_Act, DiffValsArr, Adj_WMOs, Adj_WBANs, var, homogtype, nstations)
# Make the Plots
PlotAdjs(OUTPLOTS, HistAdjMagsAct, GaussCurve, MergeFit, DiffArr, DiffValsArr, Xarr, Adj_Locs, nmons, styear, edyear, ParamDict[var][1])
# Finish
if __name__ == '__main__':
main(sys.argv[1:])
| cc0-1.0 |
ngouzy/smartchangelog | tests/unit/test_commit.py | 2 | 2073 | from smartchangelog import datetools
from smartchangelog.commit import Commit
from smartchangelog.commitmsg import CommitType
from tests.unit import data_file_path
class TestCommit:
def test_parse(self):
# GIVEN
with open(data_file_path('one.gitlog'), encoding='utf-8') as log_file:
log = log_file.read()
expected = Commit(
id='a6f79b56acbb9e58327ecf91feed611bb614927f',
author='Nicolas Gouzy <nicolas.gouzy@orange.com>',
date=datetools.str2date('2017-03-23 17:30:56 +0100'),
type=CommitType.refactor,
scope='changelog',
subject='better model',
body='NamedTuple rocks !'
)
# WHEN
changelog_item = Commit.parse(log)
# THEN
assert changelog_item == expected
def test_strip_lines(self):
# GIVEN
string = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Phasellus non erat imperdiet, pellentesque nibh et, porta velit.
Fusce sit amet elit ac magna congue accumsan sed ut tellus.
Nullam at velit tincidunt, sodales mi quis, gravida metus.
Quisque pellentesque ipsum nec nunc vehicula tincidunt.
"""
expected = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.\n" \
"Phasellus non erat imperdiet, pellentesque nibh et, porta velit.\n" \
"\n" \
"Fusce sit amet elit ac magna congue accumsan sed ut tellus.\n" \
"Nullam at velit tincidunt, sodales mi quis, gravida metus.\n" \
"\n" \
"\n" \
"Quisque pellentesque ipsum nec nunc vehicula tincidunt."
# WHEN
actual = Commit.strip_lines(string)
# THEN
assert actual == expected
def test_property_name(self):
# GIVEN
prop = Commit.author
# WHEN
property_name = Commit.property_name(prop)
# THEN
assert property_name == 'author' | mit |
victorbriz/omim | 3party/protobuf/python/google/protobuf/service.py | 243 | 9144 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""DEPRECATED: Declares the RPC service interfaces.
This module declares the abstract interfaces underlying proto2 RPC
services. These are intended to be independent of any particular RPC
implementation, so that proto2 services can be used on top of a variety
of implementations. Starting with version 2.3.0, RPC implementations should
not try to build on these, but should instead provide code generator plugins
which generate code specific to the particular RPC implementation. This way
the generated code can be more appropriate for the implementation in use
and can avoid unnecessary layers of indirection.
"""
__author__ = 'petar@google.com (Petar Petrov)'
class RpcException(Exception):
"""Exception raised on failed blocking RPC method call."""
pass
class Service(object):
"""Abstract base interface for protocol-buffer-based RPC services.
Services themselves are abstract classes (implemented either by servers or as
stubs), but they subclass this base interface. The methods of this
interface can be used to call the methods of the service without knowing
its exact type at compile time (analogous to the Message interface).
"""
def GetDescriptor():
"""Retrieves this service's descriptor."""
raise NotImplementedError
def CallMethod(self, method_descriptor, rpc_controller,
request, done):
"""Calls a method of the service specified by method_descriptor.
If "done" is None then the call is blocking and the response
message will be returned directly. Otherwise the call is asynchronous
and "done" will later be called with the response value.
In the blocking case, RpcException will be raised on error.
Preconditions:
* method_descriptor.service == GetDescriptor
* request is of the exact same classes as returned by
GetRequestClass(method).
* After the call has started, the request must not be modified.
* "rpc_controller" is of the correct type for the RPC implementation being
used by this Service. For stubs, the "correct type" depends on the
RpcChannel which the stub is using.
Postconditions:
* "done" will be called when the method is complete. This may be
before CallMethod() returns or it may be at some point in the future.
* If the RPC failed, the response value passed to "done" will be None.
Further details about the failure can be found by querying the
RpcController.
"""
raise NotImplementedError
def GetRequestClass(self, method_descriptor):
"""Returns the class of the request message for the specified method.
CallMethod() requires that the request is of a particular subclass of
Message. GetRequestClass() gets the default instance of this required
type.
Example:
method = service.GetDescriptor().FindMethodByName("Foo")
request = stub.GetRequestClass(method)()
request.ParseFromString(input)
service.CallMethod(method, request, callback)
"""
raise NotImplementedError
def GetResponseClass(self, method_descriptor):
"""Returns the class of the response message for the specified method.
This method isn't really needed, as the RpcChannel's CallMethod constructs
the response protocol message. It's provided anyway in case it is useful
for the caller to know the response type in advance.
"""
raise NotImplementedError
class RpcController(object):
"""An RpcController mediates a single method call.
The primary purpose of the controller is to provide a way to manipulate
settings specific to the RPC implementation and to find out about RPC-level
errors. The methods provided by the RpcController interface are intended
to be a "least common denominator" set of features which we expect all
implementations to support. Specific implementations may provide more
advanced features (e.g. deadline propagation).
"""
# Client-side methods below
def Reset(self):
"""Resets the RpcController to its initial state.
After the RpcController has been reset, it may be reused in
a new call. Must not be called while an RPC is in progress.
"""
raise NotImplementedError
def Failed(self):
"""Returns true if the call failed.
After a call has finished, returns true if the call failed. The possible
reasons for failure depend on the RPC implementation. Failed() must not
be called before a call has finished. If Failed() returns true, the
contents of the response message are undefined.
"""
raise NotImplementedError
def ErrorText(self):
"""If Failed is true, returns a human-readable description of the error."""
raise NotImplementedError
def StartCancel(self):
"""Initiate cancellation.
Advises the RPC system that the caller desires that the RPC call be
canceled. The RPC system may cancel it immediately, may wait awhile and
then cancel it, or may not even cancel the call at all. If the call is
canceled, the "done" callback will still be called and the RpcController
will indicate that the call failed at that time.
"""
raise NotImplementedError
# Server-side methods below
def SetFailed(self, reason):
"""Sets a failure reason.
Causes Failed() to return true on the client side. "reason" will be
incorporated into the message returned by ErrorText(). If you find
you need to return machine-readable information about failures, you
should incorporate it into your response protocol buffer and should
NOT call SetFailed().
"""
raise NotImplementedError
def IsCanceled(self):
"""Checks if the client cancelled the RPC.
If true, indicates that the client canceled the RPC, so the server may
as well give up on replying to it. The server should still call the
final "done" callback.
"""
raise NotImplementedError
def NotifyOnCancel(self, callback):
"""Sets a callback to invoke on cancel.
Asks that the given callback be called when the RPC is canceled. The
callback will always be called exactly once. If the RPC completes without
being canceled, the callback will be called after completion. If the RPC
has already been canceled when NotifyOnCancel() is called, the callback
will be called immediately.
NotifyOnCancel() must be called no more than once per request.
"""
raise NotImplementedError
class RpcChannel(object):
"""Abstract interface for an RPC channel.
An RpcChannel represents a communication line to a service which can be used
to call that service's methods. The service may be running on another
machine. Normally, you should not use an RpcChannel directly, but instead
construct a stub {@link Service} wrapping it. Example:
Example:
RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234")
RpcController controller = rpcImpl.Controller()
MyService service = MyService_Stub(channel)
service.MyMethod(controller, request, callback)
"""
def CallMethod(self, method_descriptor, rpc_controller,
request, response_class, done):
"""Calls the method identified by the descriptor.
Call the given method of the remote service. The signature of this
procedure looks the same as Service.CallMethod(), but the requirements
are less strict in one important way: the request object doesn't have to
be of any specific class as long as its descriptor is method.input_type.
"""
raise NotImplementedError
| apache-2.0 |
Ictp/indico | indico/MaKaC/badge.py | 1 | 21768 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
import os
from persistent import Persistent
import tempfile
from indico.util.json import loads
from MaKaC.common.Counter import Counter
from indico.core.config import Config
class BadgeTemplateManager(Persistent):
""" This class is used to manage the badge templates
of a given conference.
An instance of this class contains the list of templates
of a conference. This conference is called the owner of the manager.
"""
def __init__(self, conf):
""" Class constructor
conf: the conference who owns this manager
"""
self.__conf = conf
self.__templates = {}
self.__counter = Counter(1)
self.__tempBackgrounds = {}
self.__tempBackgroundCounters = {}
self._PDFOptions = BadgePDFOptions(conf)
def notifyModification(self):
self._p_changed = 1
def getTemplateById(self, templateId):
"""
Returns a BadgeTemplate object, given an id
"""
return self.__templates[templateId]
def getTemplateData(self, templateId):
"""
Returns the data (a list with all the information about a template)
of a template, directly.
"""
return self.__templates[templateId].getData()
def getTemplates(self):
""" Returns a dictionary of (templateId, BadgeTemplate) keys and values
"""
return self.__templates
def hasTemplate(self, templateId):
""" Tests if there is a template stored with the given templateId
"""
return self.__templates.has_key(templateId)
def getNewTemplateId(self):
""" Returns a new an unused templateId
Increments the templateId counter
"""
return self.__counter.newCount()
def storeTemplate(self, templateId, templateData):
"""
Adds a template to the conference.
templateData is a string produced by converting the object "template" of the save() javascript
function of WConfModifBadgeDesign.tpl into a JSON string.
The string templateData is a list composed of:
-The name of the template
-A dictionary with 2 keys: width and height of the template, in pixels.
-A number which is the number of pixels per cm. It is defined in WConfModifBadgeDesign.tpl. Right now its value is 50.
-A list of dictionaries. Each dictionary has the attributes of one of the items of the template.
If the template had any temporary backgrounds, they are archived.
"""
if self.__templates.has_key(templateId):
self.__templates[templateId].setData(loads(templateData))
self.__templates[templateId].archiveTempBackgrounds(self.__conf)
else:
self.__templates[templateId] = BadgeTemplate(templateId, loads(templateData))
self.notifyModification()
def addTemplate(self, templ, templateId):
if self.__templates.has_key(templateId):
return None
else:
self.__templates[templateId] = templ
return templ
def deleteTemplate(self, templateId):
""" Deletes a template, if it exists (otherwise it does nothing)
Also deletes its backgrounds.
"""
if self.__templates.has_key(templateId):
self.__templates[templateId].deleteBackgrounds()
del(self.__templates[templateId])
self.notifyModification()
def copyTemplate(self, templateId):
"""Duplicates a template"""
if self.__templates.has_key(templateId):
srcTempl = self.getTemplateById(templateId)
destTempl = srcTempl.clone(self)
tplData = destTempl.getData()
tplData[0] += " (copy)"
destTempl.setData(tplData)
def getPDFOptions(self):
if not hasattr(self, "_PDFOptions"):
self._PDFOptions = BadgePDFOptions(self.__conf)
return self._PDFOptions
def getOwner(self):
return self.__conf
def getNewTempFile( ):
cfg = Config.getInstance()
tempPath = cfg.getUploadedFilesTempDir()
tempFileName = tempfile.mkstemp( suffix="IndicoBadge.tmp", dir = tempPath )[1]
return tempFileName
def saveFileToTemp( fd ):
fileName = getNewTempFile()
f = open( fileName, "wb" )
f.write( fd.read() )
f.close()
return fileName
class BadgeTemplate (Persistent):
""" This class represents a badge template, which
will be used to print badges.
"""
def __init__(self, id, templateData):
""" Class Constructor
templateData is the templateData string used in the method storeTemplate() of the class
BadgeTemplateManager, transformed to a Python object with the function loads().
IMPORTANT NOTE: loads() builds an objet with unicode objects inside.
if these objects are then concatenated to str objects (for example in an Indico HTML template),
this can give problems. In those cases transform the unicode object to str with .encode('utf-8').
In this class, __cleanData() already does this by calling MaKaC.services.interface.rpc.json.unicodeToUtf8
Thus, its structure is a list composed of:
-The name of the template
-A dictionary with 2 keys: width and height of the template, in pixels.
-A number which is the number of pixels per cm. It is defined in ConfModifBadgeDesign.tpl. Right now its value is 50.
-The index of the background used in the template, among the several backgrounds of the template. -1 if none
-A list of dictionaries. Each dictionary has the attributes of one of the items of the template.
"""
self.__id = id
self.__templateData = templateData
self.__cleanData()
self.__backgroundCounter = Counter() #for the backgrounds (in the future there may be more than 1 background stored per template)
self.__backgrounds = {} #dictionary with the archived backgrounds(key: id, value: LocalFile object)
self.__tempBackgroundsFilePaths = {} #dictionary with the temporary, not archived yet backgrounds (key: id, value: filepath string)
self.notifyModification()
def clone(self, templMan, templId=None):
if templId == None:
templId = templMan.getNewTemplateId()
templData = self.getData()[:]
templData[3] = -1
newTempl = BadgeTemplate(templId, templData)
templMan.addTemplate(newTempl, templId)
if self.getBackground(self.getUsedBackgroundId())[1] != None:
templData[3] = 0
fpath = self.getBackground(self.getUsedBackgroundId())[1].getFilePath()
# make a copy of the original file
newPath = saveFileToTemp(open(fpath,"r"))
newTempl.addTempBackgroundFilePath(newPath)
newTempl.archiveTempBackgrounds(templMan.getOwner())
templMan.notifyModification()
return newTempl
def notifyModification(self):
self._p_changed = 1
def getData(self):
""" Returns the list with all the information of the template.
Useful so that javascript can analyze it on its own.
"""
# ensure that each item's got a key (in order to avoid
# using the name as key).
for item in self.__templateData[4]:
if not "key" in item:
item['key'] = item['name']
##############################
return self.__templateData
def setData(self, templateData):
""" Sets the data of the template
"""
self.__templateData = templateData
self.__cleanData()
self.notifyModification()
def getName(self):
""" Returns the name of the template
"""
return self.__templateData[0].encode('utf-8')
def getWidth(self):
""" Returns the width of the template, in pixels
"""
return self.__templateData[1]["width"]
def getHeight(self):
""" Returns the height of the template, in pixels
"""
return self.__templateData[1]["height"]
def getPixelsPerCm(self):
""" Returns the ratio pixels / cm of the template.
This ratio is defined in the HTML template. Right now its value should be 50.
"""
return self.__templateData[2]
def getItems(self):
""" Returns a list of object of the class BadgeTemplateItem with
all the items of a template.
"""
return [BadgeTemplateItem(itemData, self) for itemData in self.__templateData[4]]
def getItem(self, name):
""" Returns an object of the class BadgeTemplateItem
which corresponds to the item whose name is 'name'
"""
return BadgeTemplateItem(filter(lambda item: item['name'] == name, self.__templateData[4])[0])
def pixelsToCm(self, length):
""" Transforms a length in pixels to a length in cm.
Uses the pixelsPerCm value stored in the template
"""
return float(length) / self.__templateData[2]
def getWidthInCm(self):
""" Returns the width of the template in cm
"""
return self.pixelsToCm(self.__templateData[1]["width"])
def getHeightInCm(self):
""" Returns the height of the template in cm
"""
return self.pixelsToCm(self.__templateData[1]["height"])
def getAllBackgrounds(self):
""" Returns the list of stored background
Each background is a LocalFile object
"""
return self.__backgrounds
def getUsedBackgroundId(self):
""" Returns the id of the currently used background
This id corresponds to a stored, archived background
"""
return int(self.__templateData[3])
def getBackground(self, backgroundId):
""" Returns a tuple made of:
-a boolean
-a background based on an id
There are 3 possibilities:
-the background has already been archived. Then the boolean value is True,
and the background is a LocalFile object,
-the background is still temporary and has not been archived yet. Then the
boolean is False and the background returned is a string with the file path
to the temporary file.
-there is no background with such id. Then the method returns (None, None)
"""
if self.__backgrounds.has_key(backgroundId):
return True, self.__backgrounds[backgroundId]
if self.__tempBackgroundsFilePaths.has_key(backgroundId):
return False, self.__tempBackgroundsFilePaths[backgroundId]
return None, None
def addTempBackgroundFilePath(self, filePath):
""" Adds a filePath of a temporary background to the dictionary of temporary backgrounds.
"""
backgroundId = int(self.__backgroundCounter.newCount())
self.__tempBackgroundsFilePaths[backgroundId] = filePath
self.notifyModification()
return backgroundId
def archiveTempBackgrounds(self, conf):
""" Archives all the temporary backgrounds of this template.
This method archives all of the temporary backgrounds of this template, which are
stored in the form of filepath strings, in the __tempBackgroundsFilePaths dictionary,
to a dictionary which stores LocalFile objects. The ids are copied, since there is a
shared id counter for both dictionaries.
After the archiving, the __tempBackgroundsFilePaths dictionary is reset to {}
"""
for backgroundId, filePath in self.__tempBackgroundsFilePaths.iteritems():
cfg = Config.getInstance()
tempPath = cfg.getUploadedFilesSharedTempDir()
filePath = os.path.join(tempPath, filePath)
fileName = "background" + str(backgroundId) + "_t" + self.__id + "_c" + conf.id
from MaKaC.conference import LocalFile
file = LocalFile()
file.setName( fileName )
file.setDescription( "Background " + str(backgroundId) + " of the template " + self.__id + " of the conference " + conf.id )
file.setFileName( fileName )
file.setFilePath( filePath )
file.setOwner( conf )
file.setId( fileName )
file.archive( conf._getRepository() )
self.__backgrounds[backgroundId] = file
self.notifyModification()
self.__tempBackgroundsFilePaths = {}
def deleteTempBackgrounds(self):
""" Deletes all the temporary backgrounds of this template
"""
self.__tempBackgroundsFilePaths = {}
def deleteBackgrounds(self):
""" Deletes all of the template archived backgrounds.
To be used when a template is deleted.
"""
for localFile in self.__backgrounds.values():
localFile.delete()
def __cleanData(self):
""" Private method which cleans the list passed by the javascript in WConfModifBadgeDesign.tpl,
so that it can be properly used later.
The following actions are taken:
-When an item is erased while creating or editing a template, the item object is substitued
by a "False" value. We have to remove these "False" values from the list.
-When an item is moved, the coordinates of the item are stored for example like this: 'x':'124px', 'y':'45px'.
We have to remove that 'px' at the end.
"""
self.__templateData[4] = filter ( lambda item: item != False, self.__templateData[4]) # to remove items that have been deleted
from MaKaC.services.interface.rpc.json import unicodeToUtf8
unicodeToUtf8(self.__templateData)
for item in self.__templateData[4]:
if isinstance(item['x'],basestring) and item['x'][-2:] == 'px':
item['x'] = item['x'][0:-2]
if isinstance(item['y'],basestring) and item['y'][-2:] == 'px':
item['y'] = item['y'][0:-2]
class BadgeTemplateItem:
""" This class represents one of the items of a badge template
It is not stored in the database, just used for convenience access methods.
"""
def __init__(self, itemData, badgeTemplate):
""" Constructor
-itemData must be a dictionary with the attributes of the item
Example:
{'fontFamilyIndex': 0, 'styleIndex': 1, 'bold': True, 'key': 'Country', 'fontFamily': 'Arial',
'color': 'blue', 'selected': false, 'fontSizeIndex': 5, 'id': 0, 'width': 250, 'italic': False,
'fontSize': 'x-large', 'textAlignIndex': 1, 'y': 40, 'x': 210, 'textAlign': 'Right',
'colorIndex': 2}
The several 'index' attributes and the 'selected' attribute can be ignored, they are client-side only.
-badgeTemplate is the badgeTemplate which owns this item.
"""
###TODO:
### fontFamilyIndex and fontFamily are linked. So, if fontFamily changes, fontFamilyIndex must be changed
### as well. This is done in the client. Howerver, it needs to be improved because if we need to change
## fontFamily in the server (for example with a script) or we add a new font, the indexes will not be
## synchronized anymore.
self.__itemData = itemData
self.__badgeTemplate = badgeTemplate
def getKey(self):
""" Returns the key of the item (non-translated name).
The name of an item idientifies the kind of item it is: "Name", "Country", "Fixed Text"...
"""
if "key" in self.__itemData:
return self.__itemData['key']
else:
return self.__itemData['name']
def getFixedText(self):
""" Returns the text content of a Fixed Text item.
To be used only on items whose name is "Fixed Text"
"""
return self.__itemData['text']
def getX(self):
""" Returns the x coordinate of the item, in pixels.
"""
return self.__itemData['x']
def getXInCm(self):
""" Returns the x coordinate of the item, in cm.
"""
return self.__badgeTemplate.pixelsToCm(self.getX())
def getY(self):
""" Returns the y coordinate of the item, in pixels.
"""
return self.__itemData['y']
def getYInCm(self):
""" Returns the y coordinate of the item, in cm.
"""
return self.__badgeTemplate.pixelsToCm(self.getY())
def getFont(self):
""" Returns the name of the font used by this item.
"""
return self.__itemData['fontFamily']
def getFontSize(self):
""" Returns the font size used by this item.
Actual possible values are: 'xx-small', 'x-small', 'small', 'normal', 'large', 'x-large', 'xx-large'
They each correspond to one of the 7 HTML sizes.
"""
return self.__itemData['fontSize']
def getColor(self):
""" Returns the color used by the item, as a string.
"""
return self.__itemData['color']
def getWidth(self):
""" Returns the width of the item, in pixels.
"""
return self.__itemData['width']
def getWidthInCm(self):
""" Returns the width of the item, in cm.
"""
return self.__badgeTemplate.pixelsToCm(self.getWidth())
def isBold(self):
""" Checks of the item is bold (returns a boolean)
"""
return self.__itemData['bold']
def isItalic(self):
""" Checks of the item is italic (returns a boolean)
"""
return self.__itemData['italic']
def getTextAlign(self):
""" Returns the text alignment of the item, as a string.
Actual possible values: 'Left', 'Right', 'Center', 'Justified'
"""
return self.__itemData['textAlign']
class BadgePDFOptions(Persistent):
""" This class stores the badge PDF options for a conference.
Badge PDF options include, for now, the page margins and margins between badges.
The default values are CERN's defaults in cm.
"""
def __init__(self, conference):
if conference.getId() == "default":
#Best values for CERN printing service
self.__topMargin = 1.6
self.__bottomMargin = 1.1
self.__leftMargin = 1.6
self.__rightMargin = 1.4
self.__marginColumns = 1.0
self.__marginRows = 0.0
self._pageSize = "A4"
self._drawDashedRectangles = True
else:
from MaKaC.conference import CategoryManager
defaultConferencePDFOptions = CategoryManager().getDefaultConference().getBadgeTemplateManager().getPDFOptions()
self.__topMargin = defaultConferencePDFOptions.getTopMargin()
self.__bottomMargin = defaultConferencePDFOptions.getBottomMargin()
self.__leftMargin = defaultConferencePDFOptions.getLeftMargin()
self.__rightMargin = defaultConferencePDFOptions.getRightMargin()
self.__marginColumns = defaultConferencePDFOptions.getMarginColumns()
self.__marginRows = defaultConferencePDFOptions.getMarginRows()
self._pageSize = defaultConferencePDFOptions.getPagesize()
self._drawDashedRectangles = defaultConferencePDFOptions.getDrawDashedRectangles()
def getTopMargin(self):
return self.__topMargin
def getBottomMargin(self):
return self.__bottomMargin
def getLeftMargin(self):
return self.__leftMargin
def getRightMargin(self):
return self.__rightMargin
def getMarginColumns(self):
return self.__marginColumns
def getMarginRows(self):
return self.__marginRows
def getPagesize(self):
if not hasattr(self, "_pageSize"):
self._pageSize = "A4"
return self._pageSize
def getDrawDashedRectangles(self):
""" Returns if we should draw a dashed rectangle around each badge or not.
Will return a Boolean
"""
if not hasattr(self, "_drawDashedRectangles"):
self._drawDashedRectangles = True
return self._drawDashedRectangles
def setTopMargin(self, value):
self.__topMargin = value
def setBottomMargin(self, value):
self.__bottomMargin = value
def setLeftMargin(self, value):
self.__leftMargin = value
def setRightMargin(self, value):
self.__rightMargin = value
def setMarginColumns(self, value):
self.__marginColumns = value
def setMarginRows(self, value):
self.__marginRows = value
def setPagesize(self, value):
self._pageSize = value
def setDrawDashedRectangles(self, value):
""" Sets if we should draw a dashed rectangle around each badge or not.
value must be a Boolean
"""
self._drawDashedRectangles = value
| gpl-3.0 |
rogerthat-platform/rogerthat-ios-client | 3rdParty/Code/3rdParty/zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Scanner/__init__.py | 34 | 14714 | """SCons.Scanner
The Scanner package for the SCons software construction utility.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/__init__.py 5023 2010/06/14 22:05:46 scons"
import re
import SCons.Node.FS
import SCons.Util
class _Null(object):
pass
# This is used instead of None as a default argument value so None can be
# used as an actual argument value.
_null = _Null
def Scanner(function, *args, **kw):
"""
Public interface factory function for creating different types
of Scanners based on the different types of "functions" that may
be supplied.
TODO: Deprecate this some day. We've moved the functionality
inside the Base class and really don't need this factory function
any more. It was, however, used by some of our Tool modules, so
the call probably ended up in various people's custom modules
patterned on SCons code.
"""
if SCons.Util.is_Dict(function):
return Selector(function, *args, **kw)
else:
return Base(function, *args, **kw)
class FindPathDirs(object):
"""A class to bind a specific *PATH variable name to a function that
will return all of the *path directories."""
def __init__(self, variable):
self.variable = variable
def __call__(self, env, dir=None, target=None, source=None, argument=None):
import SCons.PathList
try:
path = env[self.variable]
except KeyError:
return ()
dir = dir or env.fs._cwd
path = SCons.PathList.PathList(path).subst_path(env, target, source)
return tuple(dir.Rfindalldirs(path))
class Base(object):
"""
The base class for dependency scanners. This implements
straightforward, single-pass scanning of a single file.
"""
def __init__(self,
function,
name = "NONE",
argument = _null,
skeys = _null,
path_function = None,
# Node.FS.Base so that, by default, it's okay for a
# scanner to return a Dir, File or Entry.
node_class = SCons.Node.FS.Base,
node_factory = None,
scan_check = None,
recursive = None):
"""
Construct a new scanner object given a scanner function.
'function' - a scanner function taking two or three
arguments and returning a list of strings.
'name' - a name for identifying this scanner object.
'argument' - an optional argument that, if specified, will be
passed to both the scanner function and the path_function.
'skeys' - an optional list argument that can be used to determine
which scanner should be used for a given Node. In the case of File
nodes, for example, the 'skeys' would be file suffixes.
'path_function' - a function that takes four or five arguments
(a construction environment, Node for the directory containing
the SConscript file that defined the primary target, list of
target nodes, list of source nodes, and optional argument for
this instance) and returns a tuple of the directories that can
be searched for implicit dependency files. May also return a
callable() which is called with no args and returns the tuple
(supporting Bindable class).
'node_class' - the class of Nodes which this scan will return.
If node_class is None, then this scanner will not enforce any
Node conversion and will return the raw results from the
underlying scanner function.
'node_factory' - the factory function to be called to translate
the raw results returned by the scanner function into the
expected node_class objects.
'scan_check' - a function to be called to first check whether
this node really needs to be scanned.
'recursive' - specifies that this scanner should be invoked
recursively on all of the implicit dependencies it returns
(the canonical example being #include lines in C source files).
May be a callable, which will be called to filter the list
of nodes found to select a subset for recursive scanning
(the canonical example being only recursively scanning
subdirectories within a directory).
The scanner function's first argument will be a Node that should
be scanned for dependencies, the second argument will be an
Environment object, the third argument will be the tuple of paths
returned by the path_function, and the fourth argument will be
the value passed into 'argument', and the returned list should
contain the Nodes for all the direct dependencies of the file.
Examples:
s = Scanner(my_scanner_function)
s = Scanner(function = my_scanner_function)
s = Scanner(function = my_scanner_function, argument = 'foo')
"""
# Note: this class could easily work with scanner functions that take
# something other than a filename as an argument (e.g. a database
# node) and a dependencies list that aren't file names. All that
# would need to be changed is the documentation.
self.function = function
self.path_function = path_function
self.name = name
self.argument = argument
if skeys is _null:
if SCons.Util.is_Dict(function):
skeys = list(function.keys())
else:
skeys = []
self.skeys = skeys
self.node_class = node_class
self.node_factory = node_factory
self.scan_check = scan_check
if callable(recursive):
self.recurse_nodes = recursive
elif recursive:
self.recurse_nodes = self._recurse_all_nodes
else:
self.recurse_nodes = self._recurse_no_nodes
def path(self, env, dir=None, target=None, source=None):
if not self.path_function:
return ()
if not self.argument is _null:
return self.path_function(env, dir, target, source, self.argument)
else:
return self.path_function(env, dir, target, source)
def __call__(self, node, env, path = ()):
"""
This method scans a single object. 'node' is the node
that will be passed to the scanner function, and 'env' is the
environment that will be passed to the scanner function. A list of
direct dependency nodes for the specified node will be returned.
"""
if self.scan_check and not self.scan_check(node, env):
return []
self = self.select(node)
if not self.argument is _null:
list = self.function(node, env, path, self.argument)
else:
list = self.function(node, env, path)
kw = {}
if hasattr(node, 'dir'):
kw['directory'] = node.dir
node_factory = env.get_factory(self.node_factory)
nodes = []
for l in list:
if self.node_class and not isinstance(l, self.node_class):
l = node_factory(l, **kw)
nodes.append(l)
return nodes
def __cmp__(self, other):
try:
return cmp(self.__dict__, other.__dict__)
except AttributeError:
# other probably doesn't have a __dict__
return cmp(self.__dict__, other)
def __hash__(self):
return id(self)
def __str__(self):
return self.name
def add_skey(self, skey):
"""Add a skey to the list of skeys"""
self.skeys.append(skey)
def get_skeys(self, env=None):
if env and SCons.Util.is_String(self.skeys):
return env.subst_list(self.skeys)[0]
return self.skeys
def select(self, node):
if SCons.Util.is_Dict(self.function):
key = node.scanner_key()
try:
return self.function[key]
except KeyError:
return None
else:
return self
def _recurse_all_nodes(self, nodes):
return nodes
def _recurse_no_nodes(self, nodes):
return []
recurse_nodes = _recurse_no_nodes
def add_scanner(self, skey, scanner):
self.function[skey] = scanner
self.add_skey(skey)
class Selector(Base):
"""
A class for selecting a more specific scanner based on the
scanner_key() (suffix) for a specific Node.
TODO: This functionality has been moved into the inner workings of
the Base class, and this class will be deprecated at some point.
(It was never exposed directly as part of the public interface,
although it is used by the Scanner() factory function that was
used by various Tool modules and therefore was likely a template
for custom modules that may be out there.)
"""
def __init__(self, dict, *args, **kw):
Base.__init__(self, None, *args, **kw)
self.dict = dict
self.skeys = list(dict.keys())
def __call__(self, node, env, path = ()):
return self.select(node)(node, env, path)
def select(self, node):
try:
return self.dict[node.scanner_key()]
except KeyError:
return None
def add_scanner(self, skey, scanner):
self.dict[skey] = scanner
self.add_skey(skey)
class Current(Base):
"""
A class for scanning files that are source files (have no builder)
or are derived files and are current (which implies that they exist,
either locally or in a repository).
"""
def __init__(self, *args, **kw):
def current_check(node, env):
return not node.has_builder() or node.is_up_to_date()
kw['scan_check'] = current_check
Base.__init__(self, *args, **kw)
class Classic(Current):
"""
A Scanner subclass to contain the common logic for classic CPP-style
include scanning, but which can be customized to use different
regular expressions to find the includes.
Note that in order for this to work "out of the box" (without
overriding the find_include() and sort_key() methods), the regular
expression passed to the constructor must return the name of the
include file in group 0.
"""
def __init__(self, name, suffixes, path_variable, regex, *args, **kw):
self.cre = re.compile(regex, re.M)
def _scan(node, env, path=(), self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, path)
kw['function'] = _scan
kw['path_function'] = FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
Current.__init__(self, *args, **kw)
def find_include(self, include, source_dir, path):
n = SCons.Node.FS.find_file(include, (source_dir,) + tuple(path))
return n, include
def sort_key(self, include):
return SCons.Node.FS._my_normcase(include)
def find_include_names(self, node):
return self.cre.findall(node.get_text_contents())
def scan(self, node, path=()):
# cache the includes list in node so we only scan it once:
if node.includes is not None:
includes = node.includes
else:
includes = self.find_include_names (node)
# Intern the names of the include files. Saves some memory
# if the same header is included many times.
node.includes = list(map(SCons.Util.silent_intern, includes))
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the #include line (including the
# " or <, since that may affect what file is found), which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for include in includes:
n, i = self.find_include(include, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (included from: %s) -- file not found" % (i, node))
else:
nodes.append((self.sort_key(include), n))
return [pair[1] for pair in sorted(nodes)]
class ClassicCPP(Classic):
"""
A Classic Scanner subclass which takes into account the type of
bracketing used to include the file, and uses classic CPP rules
for searching for the files based on the bracketing.
Note that in order for this to work, the regular expression passed
to the constructor must return the leading bracket in group 0, and
the contained filename in group 1.
"""
def find_include(self, include, source_dir, path):
if include[0] == '"':
paths = (source_dir,) + tuple(path)
else:
paths = tuple(path) + (source_dir,)
n = SCons.Node.FS.find_file(include[1], paths)
i = SCons.Util.silent_intern(include[1])
return n, i
def sort_key(self, include):
return SCons.Node.FS._my_normcase(' '.join(include))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
rjschwei/azure-sdk-for-python | azure-mgmt-notificationhubs/azure/mgmt/notificationhubs/models/namespace_resource.py | 1 | 4782 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class NamespaceResource(Resource):
"""Description of a Namespace resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param sku: The sku of the created namespace
:type sku: :class:`Sku <azure.mgmt.notificationhubs.models.Sku>`
:param namespace_resource_name: The name of the namespace.
:type namespace_resource_name: str
:param provisioning_state: Provisioning state of the Namespace.
:type provisioning_state: str
:param region: Specifies the targeted region in which the namespace
should be created. It can be any of the following values: Australia
EastAustralia SoutheastCentral USEast USEast US 2West USNorth Central
USSouth Central USEast AsiaSoutheast AsiaBrazil SouthJapan EastJapan
WestNorth EuropeWest Europe
:type region: str
:param status: Status of the namespace. It can be any of these values:1 =
Created/Active2 = Creating3 = Suspended4 = Deleting
:type status: str
:param created_at: The time the namespace was created.
:type created_at: datetime
:param service_bus_endpoint: Endpoint you can use to perform
NotificationHub operations.
:type service_bus_endpoint: str
:param subscription_id: The Id of the Azure subscription associated with
the namespace.
:type subscription_id: str
:param scale_unit: ScaleUnit where the namespace gets created
:type scale_unit: str
:param enabled: Whether or not the namespace is currently enabled.
:type enabled: bool
:param critical: Whether or not the namespace is set as Critical.
:type critical: bool
:param namespace_type: The namespace type. Possible values include:
'Messaging', 'NotificationHub'
:type namespace_type: str or :class:`NamespaceType
<azure.mgmt.notificationhubs.models.NamespaceType>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'namespace_resource_name': {'key': 'properties.name', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'region': {'key': 'properties.region', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'},
'service_bus_endpoint': {'key': 'properties.serviceBusEndpoint', 'type': 'str'},
'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},
'scale_unit': {'key': 'properties.scaleUnit', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'critical': {'key': 'properties.critical', 'type': 'bool'},
'namespace_type': {'key': 'properties.namespaceType', 'type': 'NamespaceType'},
}
def __init__(self, location, tags=None, sku=None, namespace_resource_name=None, provisioning_state=None, region=None, status=None, created_at=None, service_bus_endpoint=None, subscription_id=None, scale_unit=None, enabled=None, critical=None, namespace_type=None):
super(NamespaceResource, self).__init__(location=location, tags=tags, sku=sku)
self.namespace_resource_name = namespace_resource_name
self.provisioning_state = provisioning_state
self.region = region
self.status = status
self.created_at = created_at
self.service_bus_endpoint = service_bus_endpoint
self.subscription_id = subscription_id
self.scale_unit = scale_unit
self.enabled = enabled
self.critical = critical
self.namespace_type = namespace_type
| mit |
Rudloff/youtube-dl | youtube_dl/extractor/drtv.py | 18 | 4209 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
parse_iso8601,
)
class DRTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dr\.dk/tv/se/(?:[^/]+/)*(?P<id>[\da-z-]+)(?:[/#?]|$)'
_TEST = {
'url': 'https://www.dr.dk/tv/se/boern/ultra/panisk-paske/panisk-paske-5',
'md5': 'dc515a9ab50577fa14cc4e4b0265168f',
'info_dict': {
'id': 'panisk-paske-5',
'ext': 'mp4',
'title': 'Panisk Påske (5)',
'description': 'md5:ca14173c5ab24cd26b0fcc074dff391c',
'timestamp': 1426984612,
'upload_date': '20150322',
'duration': 1455,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if '>Programmet er ikke længere tilgængeligt' in webpage:
raise ExtractorError(
'Video %s is not available' % video_id, expected=True)
video_id = self._search_regex(
r'data-(?:material-identifier|episode-slug)="([^"]+)"',
webpage, 'video id')
programcard = self._download_json(
'http://www.dr.dk/mu/programcard/expanded/%s' % video_id,
video_id, 'Downloading video JSON')
data = programcard['Data'][0]
title = data['Title']
description = data['Description']
timestamp = parse_iso8601(data['CreatedTime'])
thumbnail = None
duration = None
restricted_to_denmark = False
formats = []
subtitles = {}
for asset in data['Assets']:
if asset['Kind'] == 'Image':
thumbnail = asset['Uri']
elif asset['Kind'] == 'VideoResource':
duration = asset['DurationInMilliseconds'] / 1000.0
restricted_to_denmark = asset['RestrictedToDenmark']
spoken_subtitles = asset['Target'] == 'SpokenSubtitles'
for link in asset['Links']:
uri = link['Uri']
target = link['Target']
format_id = target
preference = None
if spoken_subtitles:
preference = -1
format_id += '-spoken-subtitles'
if target == 'HDS':
formats.extend(self._extract_f4m_formats(
uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43',
video_id, preference, f4m_id=format_id))
elif target == 'HLS':
formats.extend(self._extract_m3u8_formats(
uri, video_id, 'mp4', preference=preference,
m3u8_id=format_id))
else:
bitrate = link.get('Bitrate')
if bitrate:
format_id += '-%s' % bitrate
formats.append({
'url': uri,
'format_id': format_id,
'tbr': bitrate,
'ext': link.get('FileFormat'),
})
subtitles_list = asset.get('SubtitlesList')
if isinstance(subtitles_list, list):
LANGS = {
'Danish': 'da',
}
for subs in subtitles_list:
lang = subs['Language']
subtitles[LANGS.get(lang, lang)] = [{'url': subs['Uri'], 'ext': 'vtt'}]
if not formats and restricted_to_denmark:
raise ExtractorError(
'Unfortunately, DR is not allowed to show this program outside Denmark.', expected=True)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
| unlicense |
tjanez/ansible | lib/ansible/modules/source_control/gitlab_user.py | 10 | 12548 | #!/usr/bin/python
# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: gitlab_user
short_description: Creates/updates/deletes Gitlab Users
description:
- When the user does not exists in Gitlab, it will be created.
- When the user does exists and state=absent, the user will be deleted.
- When changes are made to user, the user will be updated.
version_added: "2.1"
author: "Werner Dijkerman (@dj-wasabi)"
requirements:
- pyapi-gitlab python module
- administrator rights on the Gitlab server
options:
server_url:
description:
- Url of Gitlab server, with protocol (http or https).
required: true
validate_certs:
description:
- When using https if SSL certificate needs to be verified.
required: false
default: true
aliases:
- verify_ssl
login_user:
description:
- Gitlab user name.
required: false
default: null
login_password:
description:
- Gitlab password for login_user
required: false
default: null
login_token:
description:
- Gitlab token for logging in.
required: false
default: null
name:
description:
- Name of the user you want to create
required: true
username:
description:
- The username of the user.
required: true
password:
description:
- The password of the user.
required: true
email:
description:
- The email that belongs to the user.
required: true
sshkey_name:
description:
- The name of the sshkey
required: false
default: null
sshkey_file:
description:
- The ssh key itself.
required: false
default: null
group:
description:
- Add user as an member to this group.
required: false
default: null
access_level:
description:
- The access level to the group. One of the following can be used.
- guest
- reporter
- developer
- master
- owner
required: false
default: null
state:
description:
- create or delete group.
- Possible values are present and absent.
required: false
default: present
choices: ["present", "absent"]
'''
EXAMPLES = '''
- name: Delete Gitlab User
gitlab_user:
server_url: http://gitlab.example.com
validate_certs: False
login_token: WnUzDsxjy8230-Dy_k
username: myusername
state: absent
delegate_to: localhost
- name: Create Gitlab User
gitlab_user:
server_url: https://gitlab.dj-wasabi.local
validate_certs: True
login_user: dj-wasabi
login_password: MySecretPassword
name: My Name
username: myusername
password: mysecretpassword
email: me@example.com
sshkey_name: MySSH
sshkey_file: ssh-rsa AAAAB3NzaC1yc...
state: present
delegate_to: localhost
'''
RETURN = '''# '''
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except:
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import *
class GitLabUser(object):
def __init__(self, module, git):
self._module = module
self._gitlab = git
def addToGroup(self, group_id, user_id, access_level):
if access_level == "guest":
level = 10
elif access_level == "reporter":
level = 20
elif access_level == "developer":
level = 30
elif access_level == "master":
level = 40
elif access_level == "owner":
level = 50
return self._gitlab.addgroupmember(group_id, user_id, level)
def createOrUpdateUser(self, user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level):
group_id = ''
arguments = {"name": user_name,
"username": user_username,
"email": user_email}
if group_name is not None:
if self.existsGroup(group_name):
group_id = self.getGroupId(group_name)
if self.existsUser(user_username):
self.updateUser(group_id, user_sshkey_name, user_sshkey_file, access_level, arguments)
else:
if self._module.check_mode:
self._module.exit_json(changed=True)
self.createUser(group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments)
def createUser(self, group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments):
user_changed = False
# Create the user
user_username = arguments['username']
user_name = arguments['name']
user_email = arguments['email']
if self._gitlab.createuser(password=user_password, **arguments):
user_id = self.getUserId(user_username)
if self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
user_changed = True
# Add the user to the group if group_id is not empty
if group_id != '':
if self.addToGroup(group_id, user_id, access_level):
user_changed = True
user_changed = True
# Exit with change to true or false
if user_changed:
self._module.exit_json(changed=True, result="Created the user")
else:
self._module.exit_json(changed=False)
def deleteUser(self, user_username):
user_id = self.getUserId(user_username)
if self._gitlab.deleteuser(user_id):
self._module.exit_json(changed=True, result="Successfully deleted user %s" % user_username)
else:
self._module.exit_json(changed=False, result="User %s already deleted or something went wrong" % user_username)
def existsGroup(self, group_name):
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return True
return False
def existsUser(self, username):
found_user = self._gitlab.getusers(search=username)
for user in found_user:
if user['id'] != '':
return True
return False
def getGroupId(self, group_name):
for group in self._gitlab.getall(self._gitlab.getgroups):
if group['name'] == group_name:
return group['id']
def getUserId(self, username):
found_user = self._gitlab.getusers(search=username)
for user in found_user:
if user['id'] != '':
return user['id']
def updateUser(self, group_id, user_sshkey_name, user_sshkey_file, access_level, arguments):
user_changed = False
user_username = arguments['username']
user_id = self.getUserId(user_username)
user_data = self._gitlab.getuser(user_id=user_id)
# Lets check if we need to update the user
for arg_key, arg_value in arguments.items():
if user_data[arg_key] != arg_value:
user_changed = True
if user_changed:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._gitlab.edituser(user_id=user_id, **arguments)
user_changed = True
if self._module.check_mode or self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file):
user_changed = True
if group_id != '':
if self._module.check_mode or self.addToGroup(group_id, user_id, access_level):
user_changed = True
if user_changed:
self._module.exit_json(changed=True, result="The user %s is updated" % user_username)
else:
self._module.exit_json(changed=False, result="The user %s is already up2date" % user_username)
def main():
global user_id
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True),
validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
login_user=dict(required=False, no_log=True),
login_password=dict(required=False, no_log=True),
login_token=dict(required=False, no_log=True),
name=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
email=dict(required=True),
sshkey_name=dict(required=False),
sshkey_file=dict(required=False),
group=dict(required=False),
access_level=dict(required=False, choices=["guest", "reporter", "developer", "master", "owner"]),
state=dict(default="present", choices=["present", "absent"]),
),
supports_check_mode=True
)
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab")
server_url = module.params['server_url']
verify_ssl = module.params['validate_certs']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_token = module.params['login_token']
user_name = module.params['name']
user_username = module.params['username']
user_password = module.params['password']
user_email = module.params['email']
user_sshkey_name = module.params['sshkey_name']
user_sshkey_file = module.params['sshkey_file']
group_name = module.params['group']
access_level = module.params['access_level']
state = module.params['state']
# We need both login_user and login_password or login_token, otherwise we fail.
if login_user is not None and login_password is not None:
use_credentials = True
elif login_token is not None:
use_credentials = False
else:
module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token")
# Check if vars are none
if user_sshkey_file is not None and user_sshkey_name is not None:
use_sshkey = True
else:
use_sshkey = False
if group_name is not None and access_level is not None:
add_to_group = True
group_name = group_name.lower()
else:
add_to_group = False
user_username = user_username.lower()
# Lets make an connection to the Gitlab server_url, with either login_user and login_password
# or with login_token
try:
if use_credentials:
git = gitlab.Gitlab(host=server_url)
git.login(user=login_user, password=login_password)
else:
git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl)
except Exception:
e = get_exception()
module.fail_json(msg="Failed to connect to Gitlab server: %s " % e)
# Validate if group exists and take action based on "state"
user = GitLabUser(module, git)
# Check if user exists, if not exists and state = absent, we exit nicely.
if not user.existsUser(user_username) and state == "absent":
module.exit_json(changed=False, result="User already deleted or does not exists")
else:
# User exists,
if state == "absent":
user.deleteUser(user_username)
else:
user.createOrUpdateUser(user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level)
if __name__ == '__main__':
main()
| gpl-3.0 |
darkleons/BE | addons/product/partner.py | 385 | 1696 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_name = 'res.partner'
_inherit = 'res.partner'
_columns = {
'property_product_pricelist': fields.property(
type='many2one',
relation='product.pricelist',
domain=[('type','=','sale')],
string="Sale Pricelist",
help="This pricelist will be used, instead of the default one, for sales to the current partner"),
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['property_product_pricelist']
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_probe_py3.py | 1 | 4647 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayProbe(SubResource):
"""Probe of the application gateway.
:param id: Resource ID.
:type id: str
:param protocol: Protocol. Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2017_09_01.models.ApplicationGatewayProtocol
:param host: Host name to send the probe to.
:type host: str
:param path: Relative path of probe. Valid path starts from '/'. Probe is
sent to <Protocol>://<host>:<port><path>
:type path: str
:param interval: The probing interval in seconds. This is the time
interval between two consecutive probes. Acceptable values are from 1
second to 86400 seconds.
:type interval: int
:param timeout: the probe timeout in seconds. Probe marked as failed if
valid response is not received with this timeout period. Acceptable values
are from 1 second to 86400 seconds.
:type timeout: int
:param unhealthy_threshold: The probe retry count. Backend server is
marked down after consecutive probe failure count reaches
UnhealthyThreshold. Acceptable values are from 1 second to 20.
:type unhealthy_threshold: int
:param pick_host_name_from_backend_http_settings: Whether the host header
should be picked from the backend http settings. Default value is false.
:type pick_host_name_from_backend_http_settings: bool
:param min_servers: Minimum number of servers that are always marked
healthy. Default value is 0.
:type min_servers: int
:param match: Criterion for classifying a healthy probe response.
:type match:
~azure.mgmt.network.v2017_09_01.models.ApplicationGatewayProbeHealthResponseMatch
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host': {'key': 'properties.host', 'type': 'str'},
'path': {'key': 'properties.path', 'type': 'str'},
'interval': {'key': 'properties.interval', 'type': 'int'},
'timeout': {'key': 'properties.timeout', 'type': 'int'},
'unhealthy_threshold': {'key': 'properties.unhealthyThreshold', 'type': 'int'},
'pick_host_name_from_backend_http_settings': {'key': 'properties.pickHostNameFromBackendHttpSettings', 'type': 'bool'},
'min_servers': {'key': 'properties.minServers', 'type': 'int'},
'match': {'key': 'properties.match', 'type': 'ApplicationGatewayProbeHealthResponseMatch'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, protocol=None, host: str=None, path: str=None, interval: int=None, timeout: int=None, unhealthy_threshold: int=None, pick_host_name_from_backend_http_settings: bool=None, min_servers: int=None, match=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayProbe, self).__init__(id=id, **kwargs)
self.protocol = protocol
self.host = host
self.path = path
self.interval = interval
self.timeout = timeout
self.unhealthy_threshold = unhealthy_threshold
self.pick_host_name_from_backend_http_settings = pick_host_name_from_backend_http_settings
self.min_servers = min_servers
self.match = match
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| mit |
AlCutter/certificate-transparency | python/ct/client/db/cert_db.py | 35 | 2480 | import abc
import hashlib
class CertDB(object):
"""Database interface for storing X509 certificate information."""
__metaclass__ = abc.ABCMeta
@staticmethod
def sha256_hash(der_cert):
return hashlib.sha256(der_cert).digest()
@abc.abstractmethod
def store_cert_desc(self, cert_desc, index, log_key):
"""Stores a certificate using its description.
Args:
cert: CertificateDescription
index: position in log
log_key: log id in LogDB"""
@abc.abstractmethod
def store_certs_desc(self, certs, log_key):
"""Store certificates using its descriptions.
Batched version of store_cert_desc.
Args:
certs: iterable of (CertificateDescription, index) tuples
log_key: log id in LogDB"""
@abc.abstractmethod
def get_cert_by_sha256_hash(self, cert_sha256_hash):
"""Fetch a certificate with a matching SHA256 hash
Args:
cert_sha256_hash: the SHA256 hash of the certificate
Returns:
A DER-encoded certificate, or None if the cert is not found."""
@abc.abstractmethod
def scan_certs(self, limit=0):
"""Scan all certificates.
Args:
limit: maximum number of entries to yield. Default is no
limit.
Yields:
DER-encoded certificates."""
@abc.abstractmethod
def scan_certs_by_subject(self, subject_name, limit=0):
"""Scan certificates matching a subject name.
Args:
subject_name: a subject name, usually a domain. A scan for
example.com returns certificates for www.example.com,
*.example.com, test.mail.example.com, etc. Similarly
'com' can be used to look for all .com certificates.
Wildcards are treated as literal characters: a search
for *.example.com returns certificates for
*.example.com but not for mail.example.com and vice
versa.
Name may also be a common name rather than a DNS name,
e.g., "Trustworthy Certificate Authority".
limit: maximum number of entries to yield. Default is no
limit.
Yields:
DER-encoded certificates."""
| apache-2.0 |
EduPepperPD/pepper2013 | common/lib/xmodule/xmodule/tests/test_content.py | 33 | 1946 | import unittest
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.content import ContentStore
from xmodule.modulestore import Location
class Content:
def __init__(self, location, content_type):
self.location = location
self.content_type = content_type
class ContentTest(unittest.TestCase):
def test_thumbnail_none(self):
# We had a bug where a thumbnail location of None was getting transformed into a Location tuple, with
# all elements being None. It is important that the location be just None for rendering.
content = StaticContent('loc', 'name', 'content_type', 'data', None, None, None)
self.assertIsNone(content.thumbnail_location)
content = StaticContent('loc', 'name', 'content_type', 'data')
self.assertIsNone(content.thumbnail_location)
def test_static_url_generation_from_courseid(self):
url = StaticContent.convert_legacy_static_url_with_course_id('images_course_image.jpg', 'foo/bar/bz')
self.assertEqual(url, '/c4x/foo/bar/asset/images_course_image.jpg')
def test_generate_thumbnail_image(self):
contentStore = ContentStore()
content = Content(Location(u'c4x', u'mitX', u'800', u'asset', u'monsters__.jpg'), None)
(thumbnail_content, thumbnail_file_location) = contentStore.generate_thumbnail(content)
self.assertIsNone(thumbnail_content)
self.assertEqual(Location(u'c4x', u'mitX', u'800', u'thumbnail', u'monsters__.jpg'), thumbnail_file_location)
def test_compute_location(self):
# We had a bug that __ got converted into a single _. Make sure that substitution of INVALID_CHARS (like space)
# still happen.
asset_location = StaticContent.compute_location('mitX', '400', 'subs__1eo_jXvZnE .srt.sjson')
self.assertEqual(Location(u'c4x', u'mitX', u'400', u'asset', u'subs__1eo_jXvZnE_.srt.sjson', None), asset_location)
| agpl-3.0 |
NeuralEnsemble/neuroConstruct | lib/jython/Lib/codeop.py | 110 | 5243 | r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), that take progam text, a filename and a 'mode'
and:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If it
compiles as is, it's complete. If it compiles with one \n appended,
we expect more. If it doesn't compile either way, we compare the
error we get when compiling with \n or \n\n appended. If the errors
are the same, the code is broken. But if the errors are different, we
expect more. Not intuitive; not even guaranteed to hold in future
releases; but this matches the compiler's behavior from Python 1.4
through 2.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing with a
successful outcome before reaching the end of the source; in this
case, trailing symbols may be ignored instead of causing an error.
For example, a backslash followed by two newlines may be followed by
arbitrary garbage. This will be fixed once the API for the parser is
better.
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
# import internals, not guaranteed interface
from org.python.core import Py,CompilerFlags,CompileMode
from org.python.core.CompilerFlags import PyCF_DONT_IMPLY_DEDENT
# public interface
__all__ = ["compile_command", "Compile", "CommandCompiler"]
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default) or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
if symbol not in ['single','eval']:
raise ValueError,"symbol arg must be either single or eval"
symbol = CompileMode.getMode(symbol)
return Py.compile_command_flags(source,filename,symbol,Py.getCompilerFlags(),0)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self._cflags = CompilerFlags()
def __call__(self, source, filename, symbol):
symbol = CompileMode.getMode(symbol)
return Py.compile_flags(source, filename, symbol, self._cflags)
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self._cflags = CompilerFlags()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
if symbol not in ['single','eval']:
raise ValueError,"symbol arg must be either single or eval"
symbol = CompileMode.getMode(symbol)
return Py.compile_command_flags(source,filename,symbol,self._cflags,0)
| gpl-2.0 |
kawamon/hue | desktop/core/ext-py/docutils-0.14/test/test_transforms/test_docinfo.py | 2 | 11693 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: test_docinfo.py 8117 2017-06-18 23:38:18Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for docutils.transforms.frontmatter.DocInfo.
"""
from __init__ import DocutilsTestSupport
from docutils.transforms.frontmatter import DocInfo
from docutils.parsers.rst import Parser
def suite():
parser = Parser()
settings = {'language_code': 'en'}
s = DocutilsTestSupport.TransformTestSuite(
parser, suite_settings=settings)
s.generateTests(totest)
settings['language_code'] = 'de'
s.generateTests(totest_de)
settings['language_code'] = 'ru'
s.generateTests(totest_ru)
return s
totest = {}
totest_de = {}
totest_ru = {}
totest['bibliographic_field_lists'] = ((DocInfo,), [
["""\
.. Bibliographic element extraction.
:Abstract:
There can only be one abstract.
It is automatically moved to the end of the other bibliographic elements.
:Author: Me
:Version: 1
:Date: 2001-08-11
:Parameter i: integer
""",
"""\
<document source="test data">
<docinfo>
<author>
Me
<version>
1
<date>
2001-08-11
<field classes="parameter-i">
<field_name>
Parameter i
<field_body>
<paragraph>
integer
<topic classes="abstract">
<title>
Abstract
<paragraph>
There can only be one abstract.
<paragraph>
It is automatically moved to the end of the other bibliographic elements.
<comment xml:space="preserve">
Bibliographic element extraction.
"""],
["""\
.. Bibliographic element extraction.
:Abstract: Abstract 1.
:Author: Me
:Address: 123 My Street
Example, EX
:Contact: me@my.org
:Version: 1
:Abstract: Abstract 2 (should generate a warning).
:Date: 2001-08-11
:Parameter i: integer
""",
"""\
<document source="test data">
<docinfo>
<author>
Me
<address xml:space="preserve">
123 My Street
Example, EX
<contact>
<reference refuri="mailto:me@my.org">
me@my.org
<version>
1
<field>
<field_name>
Abstract
<field_body>
<paragraph>
Abstract 2 (should generate a warning).
<system_message level="2" line="9" source="test data" type="WARNING">
<paragraph>
There can only be one "Abstract" field.
<date>
2001-08-11
<field classes="parameter-i">
<field_name>
Parameter i
<field_body>
<paragraph>
integer
<topic classes="abstract">
<title>
Abstract
<paragraph>
Abstract 1.
<comment xml:space="preserve">
Bibliographic element extraction.
"""],
["""\
:Author: - must be a paragraph
:Status: a *simple* paragraph
:Date: But only one
paragraph.
:Version:
.. and not empty either
""",
"""\
<document source="test data">
<docinfo>
<field>
<field_name>
Author
<field_body>
<bullet_list bullet="-">
<list_item>
<paragraph>
must be a paragraph
<system_message level="2" line="1" source="test data" type="WARNING">
<paragraph>
Cannot extract bibliographic field "Author" containing anything other than a single paragraph.
<status>
a \n\
<emphasis>
simple
paragraph
<field>
<field_name>
Date
<field_body>
<paragraph>
But only one
<paragraph>
paragraph.
<system_message level="2" line="3" source="test data" type="WARNING">
<paragraph>
Cannot extract compound bibliographic field "Date".
<field>
<field_name>
Version
<field_body>
<system_message level="2" line="6" source="test data" type="WARNING">
<paragraph>
Cannot extract empty bibliographic field "Version".
<comment xml:space="preserve">
and not empty either
"""],
["""\
:Authors: Me, Myself, **I**
:Authors: PacMan; Ms. PacMan; PacMan, Jr.
:Authors:
Here
There
*Everywhere*
:Authors: - First
- Second
- Third
""",
"""\
<document source="test data">
<docinfo>
<authors>
<author>
Me
<author>
Myself
<author>
I
<authors>
<author>
PacMan
<author>
Ms. PacMan
<author>
PacMan, Jr.
<authors>
<author>
Here
<author>
There
<author>
<emphasis>
Everywhere
<authors>
<author>
First
<author>
Second
<author>
Third
"""],
["""\
:Authors: Only One
:Authors: One, Only;
""",
"""\
<document source="test data">
<docinfo>
<authors>
<author>
Only One
<authors>
<author>
One, Only
"""],
["""\
:Authors:
:Authors: 1. One
2. Two
:Authors:
-
-
:Authors:
- One
Two
:Authors:
- One
Two
""",
"""\
<document source="test data">
<docinfo>
<field>
<field_name>
Authors
<field_body>
<system_message level="2" line="1" source="test data" type="WARNING">
<paragraph>
Cannot extract empty bibliographic field "Authors".
<field>
<field_name>
Authors
<field_body>
<enumerated_list enumtype="arabic" prefix="" suffix=".">
<list_item>
<paragraph>
One
<list_item>
<paragraph>
Two
<system_message level="2" line="3" source="test data" type="WARNING">
<paragraph>
Bibliographic field "Authors" incompatible with extraction: it must contain either a single paragraph (with authors separated by one of ";,"), multiple paragraphs (one per author), or a bullet list with one paragraph (one author) per item.
<field>
<field_name>
Authors
<field_body>
<bullet_list bullet="-">
<list_item>
<list_item>
<system_message level="2" line="6" source="test data" type="WARNING">
<paragraph>
Bibliographic field "Authors" incompatible with extraction: it must contain either a single paragraph (with authors separated by one of ";,"), multiple paragraphs (one per author), or a bullet list with one paragraph (one author) per item.
<field>
<field_name>
Authors
<field_body>
<bullet_list bullet="-">
<list_item>
<paragraph>
One
<paragraph>
Two
<system_message level="2" line="10" source="test data" type="WARNING">
<paragraph>
Bibliographic field "Authors" incompatible with extraction: it must contain either a single paragraph (with authors separated by one of ";,"), multiple paragraphs (one per author), or a bullet list with one paragraph (one author) per item.
<field>
<field_name>
Authors
<field_body>
<bullet_list bullet="-">
<list_item>
<paragraph>
One
<paragraph>
Two
<system_message level="2" line="15" source="test data" type="WARNING">
<paragraph>
Bibliographic field "Authors" incompatible with extraction: it must contain either a single paragraph (with authors separated by one of ";,"), multiple paragraphs (one per author), or a bullet list with one paragraph (one author) per item.
"""],
["""\
.. RCS keyword extraction.
:Status: (some text) $""" + """RCSfile: test_docinfo.py,v $ (more text)
:Date: (some text) $""" + """Date: 2002/10/08 01:34:23 $ (more text)
:Date: (some text) $""" + """Date: 2005-03-26T16:21:28.693201Z $ (more text)
:Version: (some text) $""" + """Revision: 1.1 $ (more text)
""",
"""\
<document source="test data">
<docinfo>
<status>
(some text) test_docinfo.py (more text)
<date>
(some text) 2002-10-08 (more text)
<date>
(some text) 2005-03-26 (more text)
<version>
(some text) 1.1 (more text)
<comment xml:space="preserve">
RCS keyword extraction.
"""],
])
totest_de['bibliographic_field_lists'] = ((DocInfo,), [
[u"""\
.. Bibliographic element extraction for a German document.
:Zusammenfassung: Abstract 1.
:Autor: Me
:Adresse: 123 My Street
Example, EX
:Kontakt: me@my.org
:Version: 1
:Datum: 2001-08-11
:Parameter i: integer
""",
u"""\
<document source="test data">
<docinfo>
<author>
Me
<address xml:space="preserve">
123 My Street
Example, EX
<contact>
<reference refuri="mailto:me@my.org">
me@my.org
<version>
1
<date>
2001-08-11
<field classes="parameter-i">
<field_name>
Parameter i
<field_body>
<paragraph>
integer
<topic classes="abstract">
<title>
Zusammenfassung
<paragraph>
Abstract 1.
<comment xml:space="preserve">
Bibliographic element extraction for a German document.
"""],])
totest_ru['bibliographic_field_lists'] = ((DocInfo,), [
[u"""\
.. Bibliographic element extraction for a Russian document.
:аннотация: Abstract 1.
:автор: Me
:адрес: 123 My Street
Example, EX
:контакт: me@my.org
:версия: 1
:дата: 2001-08-11
:Parameter i: integer
""",
u"""\
<document source="test data">
<docinfo>
<author>
Me
<address xml:space="preserve">
123 My Street
Example, EX
<contact>
<reference refuri="mailto:me@my.org">
me@my.org
<version>
1
<date>
2001-08-11
<field classes="parameter-i">
<field_name>
Parameter i
<field_body>
<paragraph>
integer
<topic classes="abstract">
<title>
Аннотация
<paragraph>
Abstract 1.
<comment xml:space="preserve">
Bibliographic element extraction for a Russian document.
"""],])
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| apache-2.0 |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/example/fcn-xs/image_segmentaion.py | 9 | 4504 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module encapsulates running image segmentation model for inference.
Example usage:
$ python image_segmentaion.py --input <your JPG image path>
"""
import argparse
import os
import numpy as np
import mxnet as mx
from PIL import Image
def make_file_extension_assertion(extension):
"""Function factory for file extension argparse assertion
Args:
extension (string): the file extension to assert
Returns:
string: the supplied extension, if assertion is successful.
"""
def file_extension_assertion(file_path):
base, ext = os.path.splitext(file_path)
if ext.lower() != extension:
raise argparse.ArgumentTypeError('File must have ' + extension + ' extension')
return file_path
return file_extension_assertion
def get_palette(num_colors=256):
"""generates the colormap for visualizing the segmentation mask
Args:
num_colors (int): the number of colors to generate in the output palette
Returns:
string: the supplied extension, if assertion is successful.
"""
pallete = [0]*(num_colors*3)
for j in range(0, num_colors):
lab = j
pallete[j*3+0] = 0
pallete[j*3+1] = 0
pallete[j*3+2] = 0
i = 0
while (lab > 0):
pallete[j*3+0] |= (((lab >> 0) & 1) << (7-i))
pallete[j*3+1] |= (((lab >> 1) & 1) << (7-i))
pallete[j*3+2] |= (((lab >> 2) & 1) << (7-i))
i = i + 1
lab >>= 3
return pallete
def get_data(img_path):
"""get the (1, 3, h, w) np.array data for the supplied image
Args:
img_path (string): the input image path
Returns:
np.array: image data in a (1, 3, h, w) shape
"""
mean = np.array([123.68, 116.779, 103.939]) # (R,G,B)
img = Image.open(img_path)
img = np.array(img, dtype=np.float32)
reshaped_mean = mean.reshape(1, 1, 3)
img = img - reshaped_mean
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2)
img = np.expand_dims(img, axis=0)
return img
def main():
"""Module main execution"""
# Initialization variables - update to change your model and execution context
model_prefix = "FCN8s_VGG16"
epoch = 19
# By default, MXNet will run on the CPU. Change to ctx = mx.gpu() to run on GPU.
ctx = mx.cpu()
fcnxs, fcnxs_args, fcnxs_auxs = mx.model.load_checkpoint(model_prefix, epoch)
fcnxs_args["data"] = mx.nd.array(get_data(args.input), ctx)
data_shape = fcnxs_args["data"].shape
label_shape = (1, data_shape[2]*data_shape[3])
fcnxs_args["softmax_label"] = mx.nd.empty(label_shape, ctx)
exector = fcnxs.bind(ctx, fcnxs_args, args_grad=None, grad_req="null", aux_states=fcnxs_args)
exector.forward(is_train=False)
output = exector.outputs[0]
out_img = np.uint8(np.squeeze(output.asnumpy().argmax(axis=1)))
out_img = Image.fromarray(out_img)
out_img.putpalette(get_palette())
out_img.save(args.output)
if __name__ == "__main__":
# Handle command line arguments
parser = argparse.ArgumentParser(description='Run VGG16-FCN-8s to segment an input image')
parser.add_argument('--input',
required=True,
type=make_file_extension_assertion('.jpg'),
help='The segmentation input JPG image')
parser.add_argument('--output',
default='segmented.png',
type=make_file_extension_assertion('.png'),
help='The segmentation putput PNG image')
args = parser.parse_args()
main()
| apache-2.0 |
fhasovic/LG-G2-D802-Kernel | android-toolchain/share/gdb/python/gdb/types.py | 137 | 5421 | # Type utilities.
# Copyright (C) 2010-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for working with gdb.Types."""
import gdb
def get_basic_type(type_):
"""Return the "basic" type of a type.
Arguments:
type_: The type to reduce to its basic type.
Returns:
type_ with const/volatile is stripped away,
and typedefs/references converted to the underlying type.
"""
while (type_.code == gdb.TYPE_CODE_REF or
type_.code == gdb.TYPE_CODE_TYPEDEF):
if type_.code == gdb.TYPE_CODE_REF:
type_ = type_.target()
else:
type_ = type_.strip_typedefs()
return type_.unqualified()
def has_field(type_, field):
"""Return True if a type has the specified field.
Arguments:
type_: The type to examine.
It must be one of gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION.
field: The name of the field to look up.
Returns:
True if the field is present either in type_ or any baseclass.
Raises:
TypeError: The type is not a struct or union.
"""
type_ = get_basic_type(type_)
if (type_.code != gdb.TYPE_CODE_STRUCT and
type_.code != gdb.TYPE_CODE_UNION):
raise TypeError("not a struct or union")
for f in type_.fields():
if f.is_base_class:
if has_field(f.type, field):
return True
else:
# NOTE: f.name could be None
if f.name == field:
return True
return False
def make_enum_dict(enum_type):
"""Return a dictionary from a program's enum type.
Arguments:
enum_type: The enum to compute the dictionary for.
Returns:
The dictionary of the enum.
Raises:
TypeError: The type is not an enum.
"""
if enum_type.code != gdb.TYPE_CODE_ENUM:
raise TypeError("not an enum type")
enum_dict = {}
for field in enum_type.fields():
# The enum's value is stored in "enumval".
enum_dict[field.name] = field.enumval
return enum_dict
def deep_items (type_):
"""Return an iterator that recursively traverses anonymous fields.
Arguments:
type_: The type to traverse. It should be one of
gdb.TYPE_CODE_STRUCT or gdb.TYPE_CODE_UNION.
Returns:
an iterator similar to gdb.Type.iteritems(), i.e., it returns
pairs of key, value, but for any anonymous struct or union
field that field is traversed recursively, depth-first.
"""
for k, v in type_.iteritems ():
if k:
yield k, v
else:
for i in deep_items (v.type):
yield i
class TypePrinter(object):
"""The base class for type printers.
Instances of this type can be used to substitute type names during
'ptype'.
A type printer must have at least 'name' and 'enabled' attributes,
and supply an 'instantiate' method.
The 'instantiate' method must either return None, or return an
object which has a 'recognize' method. This method must accept a
gdb.Type argument and either return None, meaning that the type
was not recognized, or a string naming the type.
"""
def __init__(self, name):
self.name = name
self.enabled = True
def instantiate(self):
return None
# Helper function for computing the list of type recognizers.
def _get_some_type_recognizers(result, plist):
for printer in plist:
if printer.enabled:
inst = printer.instantiate()
if inst is not None:
result.append(inst)
return None
def get_type_recognizers():
"Return a list of the enabled type recognizers for the current context."
result = []
# First try the objfiles.
for objfile in gdb.objfiles():
_get_some_type_recognizers(result, objfile.type_printers)
# Now try the program space.
_get_some_type_recognizers(result, gdb.current_progspace().type_printers)
# Finally, globals.
_get_some_type_recognizers(result, gdb.type_printers)
return result
def apply_type_recognizers(recognizers, type_obj):
"""Apply the given list of type recognizers to the type TYPE_OBJ.
If any recognizer in the list recognizes TYPE_OBJ, returns the name
given by the recognizer. Otherwise, this returns None."""
for r in recognizers:
result = r.recognize(type_obj)
if result is not None:
return result
return None
def register_type_printer(locus, printer):
"""Register a type printer.
PRINTER is the type printer instance.
LOCUS is either an objfile, a program space, or None, indicating
global registration."""
if locus is None:
locus = gdb
locus.type_printers.insert(0, printer)
| gpl-2.0 |
victormlourenco/android_kernel_lge_msm8974 | scripts/build-all.py | 1474 | 10189 | #! /usr/bin/env python
# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'KCONFIG_NOTIMESTAMP': 'true' })
make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-')
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'msmkrypton*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
sdarji/lpthw | Lib/encodings/cp863.py | 593 | 34508 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp863',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00b6, # PILCROW SIGN
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x2017, # DOUBLE LOW LINE
0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x008f: 0x00a7, # SECTION SIGN
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00a4, # CURRENCY SIGN
0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00a6, # BROKEN BAR
0x00a1: 0x00b4, # ACUTE ACCENT
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00a8, # DIAERESIS
0x00a5: 0x00b8, # CEDILLA
0x00a6: 0x00b3, # SUPERSCRIPT THREE
0x00a7: 0x00af, # MACRON
0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xb6' # 0x0086 -> PILCROW SIGN
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u2017' # 0x008d -> DOUBLE LOW LINE
u'\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa7' # 0x008f -> SECTION SIGN
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xa4' # 0x0098 -> CURRENCY SIGN
u'\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xa6' # 0x00a0 -> BROKEN BAR
u'\xb4' # 0x00a1 -> ACUTE ACCENT
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xa8' # 0x00a4 -> DIAERESIS
u'\xb8' # 0x00a5 -> CEDILLA
u'\xb3' # 0x00a6 -> SUPERSCRIPT THREE
u'\xaf' # 0x00a7 -> MACRON
u'\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x0098, # CURRENCY SIGN
0x00a6: 0x00a0, # BROKEN BAR
0x00a7: 0x008f, # SECTION SIGN
0x00a8: 0x00a4, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00af: 0x00a7, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00a6, # SUPERSCRIPT THREE
0x00b4: 0x00a1, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x0086, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00a5, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS
0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x2017: 0x008d, # DOUBLE LOW LINE
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| unlicense |
dennisfrancis/PacketManipulator | umit/pm/gui/core/app.py | 2 | 5811 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008, 2009 Adriano Monteiro Marques
#
# Author: Francesco Piccinno <stack.box@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module contains the PMApp singleton object that gives access to
the all PacketManipulator functionalities
"""
import gtk
import gobject
import os
import sys
from optparse import OptionParser
from umit.pm.core.i18n import _
from umit.pm.core.atoms import Singleton
from umit.pm.core.bus import services_boot, ServiceBus
from umit.pm.gui.core.splash import SplashScreen
from umit.pm.gui.plugins.engine import PluginEngine
from umit.pm.manager.preferencemanager import Prefs
class PMApp(Singleton):
"The PacketManipulator application singleton object"
def __init__(self, args):
"""
PacketManipulator Application class
@param args pass sys.argv
"""
gobject.threads_init()
self._args = args
root = False
try:
# FIXME: add maemo
if sys.platform == 'win32':
import ctypes
root = bool(ctypes.windll.shell32.IsUserAnAdmin())
elif os.getuid() == 0:
root = True
except: pass
if Prefs()['system.check_pyver'].value == True and \
sys.version_info[1] < 6:
dialog = gtk.MessageDialog(None, gtk.DIALOG_MODAL,
gtk.MESSAGE_WARNING,
gtk.BUTTONS_YES_NO,
_('Packet Manipulator requires at least '
'2.6 version of Python but you\'ve %s '
'installed. We not guarantee that all '
'functionalities works properly.\n\n'
'Do you want to continue?') % ".".join(
map(str, sys.version_info[:3])))
ret = dialog.run()
dialog.hide()
dialog.destroy()
if ret == gtk.RESPONSE_NO:
sys.exit(-1)
if Prefs()['system.check_root'].value == True and not root:
dialog = gtk.MessageDialog(None, gtk.DIALOG_MODAL,
gtk.MESSAGE_WARNING,
gtk.BUTTONS_YES_NO,
_('You are running Packet Manipulator as'
' non-root user!\nSome functionalities'
' need root privileges to work\n\nDo '
'you want to continue?'))
ret = dialog.run()
dialog.hide()
dialog.destroy()
if ret == gtk.RESPONSE_NO:
sys.exit(-1)
self.phase = 0
self.splash = SplashScreen()
def _idle(self):
if self.phase == 0:
self.splash.text = _("Registering icons ...")
from icons import register_icons
register_icons()
elif self.phase == 1:
self.splash.text = _("Loading preferences ...")
from umit.pm.manager.preferencemanager import Prefs
self.prefs = Prefs()
elif self.phase == 2:
services_boot()
self.splash.text = _("Creating main window ...")
from mainwindow import MainWindow
self.bus = ServiceBus()
self.main_window = MainWindow()
self.main_window.connect_tabs_signals()
self.plugin_engine = PluginEngine()
self.plugin_engine.load_selected_plugins()
# Destroy the splash screen
self.splash.hide()
self.splash.destroy()
self.splash.finished = True
del self.splash
# Now let's parse the args passed in the constructor
parser = self._create_parser()
options, args = parser.parse_args(self._args)
if options.fread:
self.main_window.open_generic_file_async(options.fread)
if options.audit:
dev1, dev2, bpf_filter = options.audit, '', ''
try:
dev1, dev2 = options.audit.split(',', 1)
dev2, bpf_filter = other.split(':', 1)
except:
pass
self.main_window.start_new_audit(dev1, dev2, bpf_filter, False, False)
return False
self.phase += 1
return True
def run(self):
self.splash.show_all()
gobject.idle_add(self._idle)
gtk.main()
def _create_parser(self):
"""
@return an OptionParser object that can handle the sys.argv passed in
the constructor.
"""
opt = OptionParser()
opt.add_option('-r', None, dest="fread",
help="Read packets/sequence from file.")
opt.add_option('-a', None, dest="audit",
help="Start an audit using intf1[,intf2][:bpf_filter]")
return opt
| gpl-2.0 |
MathYourLife/forall | src/forall/maths.py | 1 | 3364 |
import numpy as np
import sympy as sp
def make(obj, exp):
for arg in range(obj.args):
print(arg.random())
class Addition(object):
def __init__(self):
self.args = [None] * 2
self.solution = None
def __getitem__(self, key):
return self.args[key]
def __setitem__(self, key, value):
self.args[key] = value
self.solve()
def __repr__(self):
pass
def make(self, addends, solution):
pass
def solve(self):
try:
self.solution = self.args[0] + self.args[1]
except TypeError:
pass
def expression(self):
return '%s + %s' % (sp.latex(self.args[0]), sp.latex(self.args[1]))
def equation(self, x=None):
s = '%s + %s = ' % (sp.latex(self.args[0]), sp.latex(self.args[1]))
if x is None:
return s
s += sp.latex(x)
return s
def solved(self):
return '%s + %s = %s' % (sp.latex(self.args[0]), sp.latex(self.args[1]), sp.latex(self.solution))
class Random(object):
def __call__(cls, min, max, dx):
return None
class Uniform(Random):
def __call__(cls, min, max, dx):
return np.random.rand() * (max - min) + min
class UniformInteger(Uniform):
def __call__(cls, min, max, dx):
return int(np.random.rand() * (max - min) / dx) * dx + min
class Number(object):
base = 10
min = None
max = None
dx = 1.
features = None
def __init__(self):
self.features = set()
self._random = Random()
def __iter__(self):
self.n = None
return self
def __next__(self):
if self.n is None:
self.n = self.min
else:
self.n += self.step
if self.n == self.max:
raise StopIteration
return self.n
def random(self):
"""
dist: generate a random variable on a distribution of [0, 1)
"""
return self._random(self.min, self.max, self.step)
def __repr__(self):
return '<%s %s-%s>' % (self.__class__.__name__, self.min, self.max)
def SingleDigit(obj=None):
obj.min = 0
obj.max = obj.base
obj.step = 1
obj.features.add('SingleDigit')
if issubclass(UniformInteger, obj._random.__class__):
obj._random = UniformInteger()
else:
raise Exception("Tried to subclass to UniformInteger from %s" % obj._random.__class__.__name__)
return obj
def addition2(limit):
a = int(np.random.randint(0, limit))
b = int(np.random.randint(0, limit))
if np.random.rand() < 0.9:
ques = "%d + %d = " % (a, b)
else:
ques = "%d more than %d = " % (a, b)
sol = a + b
q = Question(ques, sol)
return q
def subtraction(limit):
a = int(np.random.randint(0, limit))
b = int(np.random.randint(0, limit))
a, b = max([a, b]), min([a, b])
if np.random.rand() < 0.9:
ques = "%d - %d = " % (a, b)
else:
ques = "Take %d away from %d = " % (b, a)
sol = a - b
q = Question(ques, sol)
return q
def multiplication(limit):
a = int(np.random.randint(0, limit))
b = int(np.random.randint(0, limit))
if np.random.rand() < 0.9:
ques = "%d x %d = " % (a, b)
else:
ques = "%d groups of %d = " % (a, b)
sol = a * b
q = Question(ques, sol)
return q
| mit |
didrocks/snapcraft | integration_tests/test_zip_source.py | 9 | 1601 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import (
DirExists,
FileExists
)
import integration_tests
class TarPluginTestCase(integration_tests.TestCase):
def test_stage_nil_plugin(self):
project_dir = 'simple-zip'
self.run_snapcraft('stage', project_dir)
expected_files = [
'top-simple',
os.path.join('dir-simple', 'sub')
]
for expected_file in expected_files:
self.assertThat(
os.path.join(project_dir, 'stage', expected_file),
FileExists())
expected_dirs = [
'dir-simple',
]
for expected_dir in expected_dirs:
self.assertThat(
os.path.join(project_dir, 'stage', expected_dir),
DirExists())
# Regression test for
# https://bugs.launchpad.net/snapcraft/+bug/1500728
self.run_snapcraft('pull', project_dir)
| gpl-3.0 |
Letractively/timeside | tests/test_analyzer_level.py | 2 | 1278 | #! /usr/bin/env python
from unit_timeside import *
from timeside.decoder import *
from timeside.analyzer.level import Level
class TestAnalyzerLevel(unittest.TestCase):
def setUp(self):
self.analyzer = Level()
def testOnSweep(self):
"runs on sweep"
self.source = os.path.join (os.path.dirname(__file__), "samples", "sweep.wav")
max_level_value = -6.021
rms_level_value = -9.856
self.expected = {'level.max':max_level_value , 'level.rms':rms_level_value }
def testOnGuitar(self):
"runs on guitar"
self.source = os.path.join (os.path.dirname(__file__), "samples", "guitar.wav")
max_level_value = -4.258
rms_level_value = -21.945
self.expected = {'level.max':max_level_value , 'level.rms':rms_level_value }
def tearDown(self):
decoder = FileDecoder(self.source)
(decoder | self.analyzer).run()
results = self.analyzer.results
for key in self.expected.keys():
self.assertEquals(results[key].data_object.value, self.expected[key])
#print results
#print results.to_yaml()
#print results.to_json()
#print results.to_xml()
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
| gpl-2.0 |
jaedb/Iris | mopidy_iris/core.py | 1 | 37373 | import random
import string
import logging
import json
import pykka
import urllib
import os
import sys
import tornado.web
import tornado.ioloop
import time
import pickle
from pkg_resources import parse_version
from tornado.escape import json_encode
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from . import Extension
from .system import IrisSystemThread
if sys.platform == "win32":
import ctypes
# import logger
logger = logging.getLogger(__name__)
class IrisCore(pykka.ThreadingActor):
version = ""
spotify_token = False
queue_metadata = {}
connections = {}
commands = {}
initial_consume = False
radio = {
"enabled": 0,
"seed_artists": [],
"seed_genres": [],
"seed_tracks": [],
"results": [],
}
data = {
"commands": [],
"pinned": [],
}
ioloop = None
@classmethod
async def do_fetch(cls, client, request):
# This wrapper function exists to ease mocking.
return await client.fetch(request)
def setup(self, config, core):
self.config = config
self.core = core
##
# Mopidy server is starting
##
def start(self):
logger.info("Starting Iris " + Extension.version)
# Load our commands and pinned items from file
self.data["commands"] = self.load_from_file("commands")
self.data["pinned"] = self.load_from_file("pinned")
##
# Mopidy is shutting down
##
def stop(self):
logger.info("Stopping Iris")
##
# Load a dict from disk
#
# @param name String
# @return Dict
##
def load_from_file(self, name):
file_path = Extension.get_data_dir(self.config) / ("%s.pkl" % name)
try:
with file_path.open("rb") as f:
content = pickle.load(f)
f.close()
return content
except Exception:
if name == "pinned":
return []
else:
return {}
##
# Save dict object to disk
#
# @param dict Dict
# @param name String
# @return void
##
def save_to_file(self, dict, name):
file_path = Extension.get_data_dir(self.config) / ("%s.pkl" % name)
try:
with file_path.open("wb") as f:
pickle.dump(dict, f, pickle.HIGHEST_PROTOCOL)
pickle.close()
except Exception:
return False
##
# Generate a random string
#
# Used for connection_ids where none is provided by client
# @return string
##
def generateGuid(self):
return "".join(
random.choices(string.ascii_uppercase + string.digits, k=12)
)
##
# Digest a protocol header into it's id/name parts
#
# @return dict
##
def digest_protocol(self, protocol):
# if we're a string, split into list
# this handles the different ways we get this passed
# (select_subprotocols gives string, headers.get gives list)
if isinstance(protocol, str):
# make sure we strip any spaces (IE gives "element,element", proper
# browsers give "element, element")
protocol = [i.strip() for i in protocol.split(",")]
# if we've been given a valid array
try:
client_id = protocol[0]
connection_id = protocol[1]
username = protocol[2]
generated = False
# invalid, so just create a default connection, and auto-generate an ID
except BaseException:
client_id = self.generateGuid()
connection_id = self.generateGuid()
username = "Anonymous"
generated = True
# construct our protocol object, and return
return {
"client_id": client_id,
"connection_id": connection_id,
"username": username,
"generated": generated,
}
def send_message(self, *args, **kwargs):
callback = kwargs.get("callback", None)
data = kwargs.get("data", None)
logger.debug(data)
# Catch invalid recipient
if data["recipient"] not in self.connections:
error = 'Connection "' + data["recipient"] + '" not found'
logger.error(error)
error = {"message": error}
if callback:
callback(False, error)
else:
return error
# Sending of an error
if "error" in data:
message = {"jsonrpc": "2.0", "error": data["error"]}
# Sending of a regular message
else:
message = {
"jsonrpc": "2.0",
"method": data["method"] if "method" in data else None,
}
if "id" in data:
message["id"] = data["id"]
if "params" in data:
message["params"] = data["params"]
if "result" in data:
message["result"] = data["result"]
# Dispatch the message
try:
self.connections[data["recipient"]]["connection"].write_message(
json_encode(message)
)
response = {"message": "Sent message to " + data["recipient"]}
if callback:
callback(response)
else:
return response
except BaseException:
error = "Failed to send message to " + data["recipient"]
logger.error(error)
error = {"message": error}
if callback:
callback(False, error)
else:
return error
def broadcast(self, *args, **kwargs):
callback = kwargs.get("callback", None)
data = kwargs.get("data", None)
logger.debug(data)
if "error" in data:
message = {"jsonrpc": "2.0", "error": data["error"]}
else:
message = {
"jsonrpc": "2.0",
"method": data["method"] if "method" in data else None,
"params": data["params"] if "params" in data else None,
}
for connection in self.connections.values():
send_to_this_connection = True
# Don't send the broadcast to the origin, naturally
if "connection_id" in data:
if connection["connection_id"] == data["connection_id"]:
send_to_this_connection = False
if send_to_this_connection:
connection["connection"].write_message(json_encode(message))
response = {
"message": "Broadcast to "
+ str(len(self.connections))
+ " connections"
}
if callback:
callback(response)
else:
return response
##
# Connections
#
# Contains all our connections and client details. This requires
# updates when new clients connect, and old ones disconnect. These
# events are broadcast to all current connections
##
def get_connections(self, *args, **kwargs):
callback = kwargs.get("callback", None)
connections = []
for connection in self.connections.values():
connections.append(connection["client"])
response = {"connections": connections}
if callback:
callback(response)
else:
return response
def add_connection(self, *args, **kwargs):
connection = kwargs.get("connection", None)
client = kwargs.get("client", None)
logger.debug("Connection added")
logger.debug(connection)
self.connections[client["connection_id"]] = {
"client": client,
"connection_id": client["connection_id"],
"connection": connection,
}
self.broadcast(
data={
"method": "connection_added",
"params": {"connection": client},
}
)
def update_connection(self, *args, **kwargs):
callback = kwargs.get("callback", None)
data = kwargs.get("data", {})
connection_id = data["connection_id"]
if connection_id in self.connections:
username = data["username"]
client_id = data["client_id"]
self.connections[connection_id]["client"]["username"] = username
self.connections[connection_id]["client"]["client_id"] = client_id
self.broadcast(
data={
"method": "connection_changed",
"params": {
"connection": self.connections[connection_id]["client"]
},
}
)
response = {"connection": self.connections[connection_id]["client"]}
if callback:
callback(response)
else:
return response
else:
error = 'Connection "' + data["connection_id"] + '" not found'
logger.error(error)
error = {"message": error}
if callback:
callback(False, error)
else:
return error
def remove_connection(self, connection_id):
if connection_id in self.connections:
try:
client = self.connections[connection_id]["client"]
del self.connections[connection_id]
self.broadcast(
data={
"method": "connection_removed",
"params": {"connection": client},
}
)
except BaseException:
logger.error("Failed to close connection to " + connection_id)
def set_username(self, *args, **kwargs):
callback = kwargs.get("callback", None)
data = kwargs.get("data", {})
connection_id = data["connection_id"]
if connection_id in self.connections:
username = data["username"]
self.connections[connection_id]["client"]["username"] = username
self.broadcast(
data={
"method": "connection_changed",
"params": {
"connection": self.connections[connection_id]["client"]
},
}
)
response = {
"connection_id": connection_id,
"username": data["username"],
}
if callback:
callback(response)
else:
return response
else:
error = 'Connection "' + data["connection_id"] + '" not found'
logger.error(error)
error = {"message": error}
if callback:
callback(False, error)
else:
return error
##
# System controls
#
# Faciitates upgrades and configuration fetching
##
def get_config(self, *args, **kwargs):
callback = kwargs.get("callback", False)
# handle config setups where there is no username/password
# Iris won't work properly anyway, but at least we won't get server
# errors
if "spotify" in self.config and "username" in self.config["spotify"]:
spotify_username = self.config["spotify"]["username"]
else:
spotify_username = False
response = {
"config": {
"is_root": self.is_root(),
"spotify_username": spotify_username,
"country": self.config["iris"]["country"],
"locale": self.config["iris"]["locale"],
"spotify_authorization_url": self.config["iris"][
"spotify_authorization_url"
],
"lastfm_authorization_url": self.config["iris"][
"lastfm_authorization_url"
],
"genius_authorization_url": self.config["iris"][
"genius_authorization_url"
],
}
}
if callback:
callback(response)
else:
return response
async def get_version(self, *args, **kwargs):
callback = kwargs.get("callback", False)
url = "https://pypi.python.org/pypi/Mopidy-Iris/json"
http_client = AsyncHTTPClient()
try:
http_response = await http_client.fetch(url)
response_body = json.loads(http_response.body)
latest_version = response_body["info"]["version"]
current_version = Extension.version
# compare our versions, and convert result to boolean
upgrade_available = parse_version(latest_version) > parse_version(
current_version
)
upgrade_available = upgrade_available == 1
except (urllib.request.HTTPError, urllib.request.URLError):
latest_version = "0.0.0"
upgrade_available = False
response = {
"version": {
"current": current_version,
"latest": latest_version,
"is_root": self.is_root(),
"upgrade_available": upgrade_available,
}
}
if callback:
callback(response)
else:
return response
##
# Restart Mopidy
# This requires sudo access to system.sh
##
def restart(self, *args, **kwargs):
callback = kwargs.get("callback", False)
ioloop = kwargs.get("ioloop", False)
# Trigger the action
IrisSystemThread("restart", ioloop, self.restart_callback).start()
self.broadcast(data={"method": "restart_started"})
response = {"message": "Restart started"}
if callback:
callback(response)
else:
return response
def restart_callback(self, response, error, update):
if error:
self.broadcast(data={"method": "restart_error", "params": error})
elif update:
self.broadcast(data={"method": "restart_updated", "params": update})
else:
self.broadcast(
data={"method": "restart_finished", "params": response}
)
##
# Run an upgrade of Iris
##
def upgrade(self, *args, **kwargs):
callback = kwargs.get("callback", False)
ioloop = kwargs.get("ioloop", False)
self.broadcast(data={"method": "upgrade_started"})
# Trigger the action
IrisSystemThread("upgrade", ioloop, self.upgrade_callback).start()
response = {"message": "Upgrade started"}
if callback:
callback(response)
else:
return response
def upgrade_callback(self, response, error, update):
if error:
self.broadcast(data={"method": "upgrade_error", "params": error})
elif update:
self.broadcast(data={"method": "upgrade_updated", "params": update})
else:
self.broadcast(
data={"method": "upgrade_finished", "params": response}
)
self.restart()
##
# Run a mopidy local scan
# Essetially an alias to "mopidyctl local scan"
##
def local_scan(self, *args, **kwargs):
callback = kwargs.get("callback", False)
ioloop = kwargs.get("ioloop", False)
# Trigger the action
IrisSystemThread("local_scan", ioloop, self.local_scan_callback).start()
self.broadcast(data={"method": "local_scan_started"})
response = {"message": "Local scan started"}
if callback:
callback(response)
else:
return response
def local_scan_callback(self, response, error, update):
if error:
self.broadcast(data={"method": "local_scan_error", "params": error})
elif update:
self.broadcast(
data={"method": "local_scan_updated", "params": update}
)
else:
self.broadcast(
data={"method": "local_scan_finished", "params": response}
)
##
# Spotify Radio
#
# Accepts seed URIs and creates radio-like experience. When our
# tracklist is nearly empty, we fetch more recommendations. This
# can result in duplicates. We keep the recommendations limit low
# to avoid timeouts and slow UI
##
def get_radio(self, *args, **kwargs):
callback = kwargs.get("callback", False)
response = {"radio": self.radio}
if callback:
callback(response)
else:
return response
async def change_radio(self, *args, **kwargs):
callback = kwargs.get("callback", False)
data = kwargs.get("data", {})
# We're starting a new radio (or forced restart)
if data["reset"] or not self.radio["enabled"]:
starting = True
self.initial_consume = self.core.tracklist.get_consume().get()
else:
starting = False
# fetch more tracks from Mopidy-Spotify
self.radio = {
"seed_artists": data["seed_artists"],
"seed_genres": data["seed_genres"],
"seed_tracks": data["seed_tracks"],
"enabled": 1,
"results": [],
}
uris = await self.load_more_tracks()
# make sure we got recommendations
if uris:
if starting:
self.core.tracklist.clear()
self.core.tracklist.set_consume(True)
# We only want to play the first batch
added = self.core.tracklist.add(uris=uris[0:3])
if not added.get():
logger.error("No recommendations added to queue")
self.radio["enabled"] = 0
error = {
"message": "No recommendations added to queue",
"radio": self.radio,
}
if callback:
callback(False, error)
else:
return error
# Save results (minus first batch) for later use
self.radio["results"] = uris[3:]
self.add_radio_metadata(added)
if starting:
self.core.playback.play()
self.broadcast(
data={
"method": "radio_started",
"params": {"radio": self.radio},
}
)
else:
self.broadcast(
data={
"method": "radio_changed",
"params": {"radio": self.radio},
}
)
self.get_radio(callback=callback)
return
# Failed fetching/adding tracks, so no-go
else:
logger.error("No recommendations returned by Spotify")
self.radio["enabled"] = 0
error = {
"code": 32500,
"message": "Could not start radio",
"data": {"radio": self.radio},
}
if callback:
callback(False, error)
else:
return error
def stop_radio(self, *args, **kwargs):
callback = kwargs.get("callback", False)
self.radio = {
"enabled": 0,
"seed_artists": [],
"seed_genres": [],
"seed_tracks": [],
"results": [],
}
# restore initial consume state
self.core.tracklist.set_consume(self.initial_consume)
self.core.playback.stop()
self.broadcast(
data={"method": "radio_stopped", "params": {"radio": self.radio}}
)
response = {"message": "Stopped radio"}
if callback:
callback(response)
else:
return response
async def load_more_tracks(self, *args, **kwargs):
logger.info("Loading more radio tracks from Spotify")
try:
await self.get_spotify_token()
spotify_token = self.spotify_token
access_token = spotify_token["access_token"]
except BaseException:
error = "IrisFrontend: access_token missing or invalid"
logger.error(error)
return False
url = "https://api.spotify.com/v1/recommendations/"
url = (
url
+ "?seed_artists="
+ (",".join(self.radio["seed_artists"])).replace(
"spotify:artist:", ""
)
)
url = (
url
+ "&seed_genres="
+ (",".join(self.radio["seed_genres"])).replace(
"spotify:genre:", ""
)
)
url = (
url
+ "&seed_tracks="
+ (",".join(self.radio["seed_tracks"])).replace(
"spotify:track:", ""
)
)
url = url + "&limit=50"
http_client = AsyncHTTPClient()
try:
http_response = await http_client.fetch(
url, "POST", headers={"Authorization": "Bearer " + access_token}
)
response_body = json.loads(http_response.body)
uris = []
for track in response_body["tracks"]:
uris.append(track["uri"])
return uris
except (urllib.error.HTTPError, urllib.error.URLError) as e:
error = json.loads(e.read())
error_response = {
"message": "Could not fetch Spotify recommendations: "
+ error["error_description"]
}
logger.error(
"Could not fetch Spotify recommendations: "
+ error["error_description"]
)
logger.debug(error_response)
return False
async def check_for_radio_update(self):
tracklistLength = self.core.tracklist.get_length().get()
if tracklistLength < 3 and self.radio["enabled"] == 1:
# Grab our loaded tracks
uris = self.radio["results"]
# We've run out of pre-fetched tracks, so we need to get more
# recommendations
if len(uris) < 3:
uris = await self.load_more_tracks()
# Remove the next batch, and update our results
self.radio["results"] = uris[3:]
# Only add the next set of uris
uris = uris[0:3]
added = self.core.tracklist.add(uris=uris)
self.add_radio_metadata(added)
def add_radio_metadata(self, added):
seeds = ""
if len(self.radio["seed_artists"]) > 0:
seeds = seeds + (",".join(self.radio["seed_artists"])).replace(
"spotify:artist:", "spotify_artist_"
)
if len(self.radio["seed_tracks"]) > 0:
if seeds != "":
seeds = seeds + ","
seeds = seeds + (",".join(self.radio["seed_tracks"])).replace(
"spotify:track:", "spotify_track_"
)
if len(self.radio["seed_genres"]) > 0:
if seeds != "":
seeds = seeds + ","
seeds = seeds + (",".join(self.radio["seed_genres"])).replace(
"spotify:genre:", "spotify_genre_"
)
metadata = {
"tlids": [],
"added_by": "Radio",
"added_from": "iris:radio:" + seeds,
}
for added_tltrack in added.get():
metadata["tlids"].append(added_tltrack.tlid)
self.add_queue_metadata(data=metadata)
##
# Additional queue metadata
#
# This maps tltracks with extra info for display in Iris, including
# added_by and from_uri.
##
def get_queue_metadata(self, *args, **kwargs):
callback = kwargs.get("callback", False)
response = {"queue_metadata": self.queue_metadata}
if callback:
callback(response)
else:
return response
def add_queue_metadata(self, *args, **kwargs):
callback = kwargs.get("callback", False)
data = kwargs.get("data", {})
for tlid in data["tlids"]:
item = {
"tlid": tlid,
"added_from": data["added_from"]
if "added_from" in data
else None,
"added_by": data["added_by"] if "added_by" in data else None,
}
self.queue_metadata["tlid_" + str(tlid)] = item
self.broadcast(
data={
"method": "queue_metadata_changed",
"params": {"queue_metadata": self.queue_metadata},
}
)
response = {"message": "Added queue metadata"}
if callback:
callback(response)
else:
return response
def clean_queue_metadata(self, *args, **kwargs):
cleaned_queue_metadata = {}
for tltrack in self.core.tracklist.get_tl_tracks().get():
# if we have metadata for this track, push it through to cleaned
# dictionary
if "tlid_" + str(tltrack.tlid) in self.queue_metadata:
cleaned_queue_metadata[
"tlid_" + str(tltrack.tlid)
] = self.queue_metadata["tlid_" + str(tltrack.tlid)]
self.queue_metadata = cleaned_queue_metadata
##
# Server-side data assets
#
# These functions are used internally to store data locally for all users to access
##
def get_data(self, name, *args, **kwargs):
callback = kwargs.get("callback", False)
response = {name: self.data[name]}
if callback:
callback(response)
else:
return response
def set_data(self, name, *args, **kwargs):
callback = kwargs.get("callback", False)
data = kwargs.get("data", {})
# Update our temporary variable
self.data[name] = data[name]
# Save the new commands to file storage
self.save_to_file(self.data[name], name)
self.broadcast(
data={
"method": f"{name}_changed",
"params": {name: self.data[name]},
}
)
response = {"message": f"Saved {name}"}
if callback:
callback(response)
else:
return response
##
# Pinned assets
##
def get_pinned(self, *args, **kwargs):
return self.get_data("pinned", *args, **kwargs)
def set_pinned(self, *args, **kwargs):
return self.set_data("pinned", *args, **kwargs)
##
# Commands
##
def get_commands(self, *args, **kwargs):
return self.get_data("commands", *args, **kwargs)
def set_commands(self, *args, **kwargs):
return self.set_data("commands", *args, **kwargs)
async def run_command(self, *args, **kwargs):
callback = kwargs.get("callback", False)
data = kwargs.get("data", {})
error = False
if str(data["id"]) not in self.data["commands"]:
error = {
"message": "Command failed",
"description": "Could not find command by ID "
+ '"'
+ str(data["id"])
+ '"',
}
else:
command = self.data["commands"][str(data["id"])]
if "method" not in command:
error = {
"message": "Command failed",
"description": 'Missing required property "method"',
}
if "url" not in command:
error = {
"message": "Command failed",
"description": 'Missing required property "url"',
}
logger.debug("Running command " + str(command))
if error:
if callback:
callback(False, error)
return
else:
return error
# Build headers dict if additional headers are given
headers = None
if "additional_headers" in command:
d = command["additional_headers"].split("\n")
lines = list(filter(lambda x: x.find(":") > 0, d))
fields = [
(x.split(":", 1)[0].strip().lower(), x.split(":", 1)[1].strip())
for x in lines
]
headers = dict(fields)
if command["method"] == "POST":
if (
"content-type" in headers
and headers["content-type"].lower() != "application/json"
):
post_data = command["post_data"]
else:
post_data = json.dumps(command["post_data"])
request = HTTPRequest(
command["url"],
connect_timeout=5,
method="POST",
body=post_data,
validate_cert=False,
headers=headers,
)
else:
request = HTTPRequest(
command["url"],
connect_timeout=5,
validate_cert=False,
headers=headers,
)
# Make the request, and handle any request errors
try:
http_client = AsyncHTTPClient()
command_response = await http_client.fetch(request)
except Exception as e:
error = {"message": "Command failed", "description": str(e)}
if callback:
callback(False, error)
return
else:
return error
# Attempt to parse body as JSON
try:
command_response_body = json.loads(command_response.body)
except BaseException:
# Perhaps it requires unicode encoding?
try:
command_response_body = tornado.escape.to_unicode(
command_response.body
)
except BaseException:
command_response_body = ""
# Finally, return the result
response = {"message": "Command run", "response": command_response_body}
if callback:
callback(response)
return
else:
return response
##
# Spotify authentication
#
# Uses the Client Credentials Flow, so is invisible to the user.
# We need this token for any backend spotify requests (we don't tap in
# to Mopidy-Spotify, yet). Also used for passing token to frontend for
# javascript requests without use of the Authorization Code Flow.
##
async def get_spotify_token(self, *args, **kwargs):
callback = kwargs.get("callback", False)
# Expired, so go get a new one
if (
not self.spotify_token
or self.spotify_token["expires_at"] <= time.time()
):
await self.refresh_spotify_token()
response = {"spotify_token": self.spotify_token}
if callback:
callback(response)
else:
return response
async def refresh_spotify_token(self, *args, **kwargs):
callback = kwargs.get("callback", None)
try:
# Use client_id and client_secret from config
# This was introduced in Mopidy-Spotify 3.1.0
url = "https://auth.mopidy.com/spotify/token"
data = {
"client_id": self.config["spotify"]["client_id"],
"client_secret": self.config["spotify"]["client_secret"],
"grant_type": "client_credentials",
}
except (Exception):
error = {
"message": "Could not refresh Spotify token: invalid configuration"
}
if callback:
callback(False, error)
else:
return error
try:
http_client = tornado.httpclient.AsyncHTTPClient()
request = tornado.httpclient.HTTPRequest(
url, method="POST", body=urllib.parse.urlencode(data)
)
response = await self.do_fetch(http_client, request)
token = json.loads(response.body)
token["expires_at"] = time.time() + token["expires_in"]
self.spotify_token = token
self.broadcast(
data={
"method": "spotify_token_changed",
"params": {"spotify_token": self.spotify_token},
}
)
response = {"spotify_token": token}
if callback:
callback(response)
else:
return response
except (urllib.error.HTTPError, urllib.error.URLError) as e:
error = json.loads(e.read())
error = {
"message": "Could not refresh Spotify token: "
+ error["error_description"]
}
if callback:
callback(False, error)
else:
return error
##
# Detect if we're running as root
##
def is_root(self):
if sys.platform == "win32":
return ctypes.windll.shell32.IsUserAnAdmin() != 0
else:
return os.geteuid() == 0
##
# Spotify authentication
#
# Uses the Client Credentials Flow, so is invisible to the user.
# We need this token for any backend spotify requests (we don't tap in
# to Mopidy-Spotify, yet). Also used for passing token to frontend for
# javascript requests without use of the Authorization Code Flow.
##
async def get_lyrics(self, *args, **kwargs):
callback = kwargs.get("callback", False)
request = kwargs.get("request", False)
error = False
url = ""
try:
path = request.get_argument("path")
url = "https://genius.com" + path
except Exception as e:
logger.error(e)
error = {"message": "Path not valid", "description": str(e)}
try:
connection_id = request.get_argument("connection_id")
if connection_id not in self.connections:
error = {
"message": "Unauthorized request",
"description": "Connection "
+ connection_id
+ " not connected",
}
except Exception as e:
logger.error(e)
error = {
"message": "Unauthorized request",
"description": "connection_id missing",
}
if error:
return error
try:
http_client = AsyncHTTPClient()
http_response = await http_client.fetch(url)
callback(
http_response.body.decode("utf-8", errors="replace"), False
)
except (urllib.error.HTTPError, urllib.error.URLError) as e:
error = json.loads(e.read())
error = {
"message": "Could not fetch Genius lyrics: "
+ error["error_description"]
}
logger.error(
"Could not fetch Genius lyrics: " + error["error_description"]
)
logger.debug(error)
return error
##
# Simple test method to debug access to system tasks
##
def test(self, *args, **kwargs):
callback = kwargs.get("callback", False)
ioloop = kwargs.get("ioloop", False)
self.broadcast(data={"method": "test_started"})
response = {"message": "Running test... please wait"}
if callback:
callback(response)
else:
return response
IrisSystemThread("test", ioloop, self.test_callback).run()
def test_callback(self, response, error, update):
if error:
self.broadcast(data={"method": "test_error", "params": error})
elif error:
self.broadcast(data={"method": "test_updated", "params": update})
else:
self.broadcast(data={"method": "test_finished", "params": response})
| apache-2.0 |
hdm-dt-fb/rvt_model_services | commands/sync_benchmark/rps_sync_benchmark.py | 1 | 3827 | import clr
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import OpenOptions, SynchronizeWithCentralOptions, DetachFromCentralOption, RelinquishOptions
from Autodesk.Revit.DB import TransactWithCentralOptions
from Autodesk.Revit.DB import FilePath
from Autodesk.Revit.DB import WorksetConfiguration, WorksetConfigurationOption
import System
import os
import sys
from datetime import datetime
from collections import defaultdict
iterations = 10
timing_map = defaultdict(float)
time_now = str(datetime.now())
info = ""
app = __revit__.Application
benchmark_topic = os.environ.get("RVT_SYNC_BENCHMARK") or ""
machine_name = os.environ.get('COMPUTERNAME') or ""
if "RVT_QC_PATH" not in os.environ:
print("no model specified")
sys.exit()
active_nic = ""
test_ip = "9.9.9.9"
udp_conn = System.Net.Sockets.UdpClient(test_ip, 1)
local_addr = udp_conn.Client.LocalEndPoint.Address
for nic in System.Net.NetworkInformation.NetworkInterface.GetAllNetworkInterfaces():
ip_props = nic.GetIPProperties()
for addr_info in ip_props.UnicastAddresses:
if local_addr.ToString() == addr_info.Address.ToString():
active_nic = nic.Description
project = os.environ["RVT_QC_PRJ"]
model_path = os.environ["RVT_QC_PATH"]
pc_stats = os.environ["pc_stats"]
rvt_path = FilePath(model_path)
ws_conf = WorksetConfiguration(WorksetConfigurationOption.CloseAllWorksets)
open_opt = OpenOptions()
open_opt.SetOpenWorksetsConfiguration(ws_conf)
sync_opt = SynchronizeWithCentralOptions()
relinquish_opt = RelinquishOptions(True)
sync_opt.SetRelinquishOptions(relinquish_opt)
sync_opt.SaveLocalAfter = True
# sync_opt.Compact = True
sync_opt.Comment = "syncing"
trans_opt = TransactWithCentralOptions()
print(time_now)
print("machine stats:\n{}".format(pc_stats))
print(active_nic)
print("timing: {} {} times".format(model_path, iterations))
for i in range(iterations):
start = datetime.now()
print("__{:2}: start: {}".format(i, start))
doc = app.OpenDocumentFile(rvt_path, open_opt)
print(" {:2}: openend: {}".format(i, str(datetime.now())))
doc.SynchronizeWithCentral(trans_opt, sync_opt)
print(" {:2}: synced: {}".format(i, str(datetime.now())))
doc.Close()
end = datetime.now()
print(" {:2}: closed: {}".format(i, str(end)))
timing_result = end - start
timing_map[i] = timing_result.total_seconds()
print(" {:2}: single run duration: {}".format(i, str(timing_result.total_seconds())))
print(35*"=")
print("iter:seconds")
for iteration, timing in timing_map.items():
print("{}: {}".format(str(iteration).zfill(4), timing))
print(35*"=")
print("average timing:")
average = sum(timing_map.values()) / iterations
print("{} seconds".format(average))
log_info = "{};".format(time_now)
log_info += "{}:{};".format(app.VersionNumber, app.VersionBuild)
model_path = os.environ["RVT_QC_PATH"]
file_size = str(int(os.path.getsize(model_path))/1000000)
log_dir = os.environ.get("RVT_LOG_PATH")
project = os.environ.get("RVT_QC_PRJ")
pc_stats = os.environ.get("pc_stats")
log_info += "{};".format(file_size)
log_info += pc_stats
log_info += "average seconds:{};".format(average)
log_info += "iterations:{};".format(iterations)
if log_dir:
log_file = os.path.join(
log_dir,
"{}_{}_benchmark_{}.csv".format(
machine_name, project, benchmark_topic
)
)
with open(log_file, "a") as csv_file:
csv_file.write(log_info + "\n")
log_file = os.path.join(
log_dir,
"{}_{}_benchmark_{}_single_iteration_timing_.csv".format(
machine_name,
project,
benchmark_topic
)
)
with open(log_file, "a") as csv_file:
for iter_num, timing in timing_map.items():
csv_file.write("{};{};{}\n".format(time_now, iter_num, timing))
| mit |
artefactual/archivematica-history | src/archivematicaCommon/lib/externals/pyes/rivers.py | 2 | 3822 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
try:
# For Python < 2.6 or people using a newer version of simplejson
import simplejson
json = simplejson
except ImportError:
# For Python >= 2.6
import json
from .es import ES
class River(object):
def __init__(self, index_name=None, index_type=None, bulk_size=100, bulk_timeout=None):
self.name = index_name
self.index_name = index_name
self.index_type = index_type
self.bulk_size = bulk_size
self.bulk_timeout = bulk_timeout
@property
def q(self):
res = self.serialize()
index = {}
if self.name:
index['name'] = self.name
if self.index_name:
index['index'] = self.index_name
if self.index_type:
index['type'] = self.index_type
if self.bulk_size:
index['bulk_size'] = self.bulk_size
if self.bulk_timeout:
index['bulk_timeout'] = self.bulk_timeout
if index:
res['index'] = index
return res
def __repr__(self):
return str(self.q)
def to_json(self):
return json.dumps(self.q, cls=ES.encoder)
def serialize(self):
raise NotImplementedError
class RabbitMQRiver(River):
type = "rabbitmq"
def __init__(self, host="localhost", port=5672, user="guest",
password="guest", vhost="/", queue="es", exchange="es",
routing_key="es", **kwargs):
super(RabbitMQRiver, self).__init__(**kwargs)
self.host = host
self.port = port
self.user = user
self.password = password
self.vhost = vhost
self.queue = queue
self.exchange = exchange
self.routing_key = routing_key
def serialize(self):
return {
"type": self.type,
self.type: {
"host": self.host,
"port": self.port,
"user": self.user,
"pass": self.password,
"vhost": self.vhost,
"queue": self.queue,
"exchange": self.exchange,
"routing_key": self.routing_key
}
}
class TwitterRiver(River):
type = "twitter"
def __init__(self, user, password, **kwargs):
super(TwitterRiver, self).__init__(**kwargs)
self.user = user
self.password = password
def serialize(self):
return {
"type": self.type,
self.type: {
"user": self.user,
"password": self.password,
}
}
class CouchDBRiver(River):
type = "couchdb"
def __init__(self, host="localhost", port=5984, db="mydb", filter=None,
filter_params=None, script=None, user=None, password=None,
**kwargs):
super(CouchDBRiver, self).__init__(**kwargs)
self.host = host
self.port = port
self.db = db
self.filter = filter
self.filter_params = filter_params
self.script = script
self.user = user
self.password = password
def serialize(self):
result = {
"type": self.type,
self.type: {
"host": self.host,
"port": self.port,
"db": self.db,
"filter": self.filter,
}
}
if self.filter_params is not None:
result[self.type]["filter_params"] = self.filter_params
if self.script is not None:
result[self.type]["script"] = self.script
if self.user is not None:
result[self.type]["user"] = self.user
if self.password is not None:
result[self.type]["password"] = self.password
return result
| agpl-3.0 |
bouncestorage/swift | test/unit/account/test_backend.py | 3 | 63378 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.account.backend """
from collections import defaultdict
import hashlib
import json
import unittest
import pickle
import os
from time import sleep, time
from uuid import uuid4
from tempfile import mkdtemp
from shutil import rmtree
import sqlite3
import itertools
from contextlib import contextmanager
import random
from swift.account.backend import AccountBroker
from swift.common.utils import Timestamp
from test.unit import patch_policies, with_tempdir, make_timestamp_iter
from swift.common.db import DatabaseConnectionError
from swift.common.storage_policy import StoragePolicy, POLICIES
from test.unit.common.test_db import TestExampleBroker
@patch_policies
class TestAccountBroker(unittest.TestCase):
"""Tests for AccountBroker"""
def test_creation(self):
# Test AccountBroker.__init__
broker = AccountBroker(':memory:', account='a')
self.assertEqual(broker.db_file, ':memory:')
try:
with broker.get() as conn:
pass
except DatabaseConnectionError as e:
self.assertTrue(hasattr(e, 'path'))
self.assertEqual(e.path, ':memory:')
self.assertTrue(hasattr(e, 'msg'))
self.assertEqual(e.msg, "DB doesn't exist")
except Exception as e:
self.fail("Unexpected exception raised: %r" % e)
else:
self.fail("Expected a DatabaseConnectionError exception")
broker.initialize(Timestamp('1').internal)
with broker.get() as conn:
curs = conn.cursor()
curs.execute('SELECT 1')
self.assertEqual(curs.fetchall()[0][0], 1)
def test_exception(self):
# Test AccountBroker throwing a conn away after exception
first_conn = None
broker = AccountBroker(':memory:', account='a')
broker.initialize(Timestamp('1').internal)
with broker.get() as conn:
first_conn = conn
try:
with broker.get() as conn:
self.assertEqual(first_conn, conn)
raise Exception('OMG')
except Exception:
pass
self.assertTrue(broker.conn is None)
def test_empty(self):
# Test AccountBroker.empty
broker = AccountBroker(':memory:', account='a')
broker.initialize(Timestamp('1').internal)
self.assertTrue(broker.empty())
broker.put_container('o', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
self.assertTrue(not broker.empty())
sleep(.00001)
broker.put_container('o', 0, Timestamp(time()).internal, 0, 0,
POLICIES.default.idx)
self.assertTrue(broker.empty())
def test_is_status_deleted(self):
# Test AccountBroker.is_status_deleted
broker1 = AccountBroker(':memory:', account='a')
broker1.initialize(Timestamp(time()).internal)
self.assertTrue(not broker1.is_status_deleted())
broker1.delete_db(Timestamp(time()).internal)
self.assertTrue(broker1.is_status_deleted())
broker2 = AccountBroker(':memory:', account='a')
broker2.initialize(Timestamp(time()).internal)
# Set delete_timestamp greater than put_timestamp
broker2.merge_timestamps(
time(), Timestamp(time()).internal,
Timestamp(time() + 999).internal)
self.assertTrue(broker2.is_status_deleted())
def test_reclaim(self):
broker = AccountBroker(':memory:', account='test_account')
broker.initialize(Timestamp('1').internal)
broker.put_container('c', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 0)
broker.reclaim(Timestamp(time() - 999).internal, time())
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.put_container('c', 0, Timestamp(time()).internal, 0, 0,
POLICIES.default.idx)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 1)
broker.reclaim(Timestamp(time() - 999).internal, time())
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 1)
sleep(.00001)
broker.reclaim(Timestamp(time()).internal, time())
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 0)
# Test reclaim after deletion. Create 3 test containers
broker.put_container('x', 0, 0, 0, 0, POLICIES.default.idx)
broker.put_container('y', 0, 0, 0, 0, POLICIES.default.idx)
broker.put_container('z', 0, 0, 0, 0, POLICIES.default.idx)
broker.reclaim(Timestamp(time()).internal, time())
# Now delete the account
broker.delete_db(Timestamp(time()).internal)
broker.reclaim(Timestamp(time()).internal, time())
def test_delete_db_status(self):
ts = (Timestamp(t).internal for t in itertools.count(int(time())))
start = next(ts)
broker = AccountBroker(':memory:', account='a')
broker.initialize(start)
info = broker.get_info()
self.assertEqual(info['put_timestamp'], Timestamp(start).internal)
self.assertTrue(Timestamp(info['created_at']) >= start)
self.assertEqual(info['delete_timestamp'], '0')
if self.__class__ == TestAccountBrokerBeforeMetadata:
self.assertEqual(info['status_changed_at'], '0')
else:
self.assertEqual(info['status_changed_at'],
Timestamp(start).internal)
# delete it
delete_timestamp = next(ts)
broker.delete_db(delete_timestamp)
info = broker.get_info()
self.assertEqual(info['put_timestamp'], Timestamp(start).internal)
self.assertTrue(Timestamp(info['created_at']) >= start)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
self.assertEqual(info['status_changed_at'], delete_timestamp)
def test_delete_container(self):
# Test AccountBroker.delete_container
broker = AccountBroker(':memory:', account='a')
broker.initialize(Timestamp('1').internal)
broker.put_container('o', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 1)
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 0)
sleep(.00001)
broker.put_container('o', 0, Timestamp(time()).internal, 0, 0,
POLICIES.default.idx)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 0").fetchone()[0], 0)
self.assertEqual(conn.execute(
"SELECT count(*) FROM container "
"WHERE deleted = 1").fetchone()[0], 1)
def test_put_container(self):
# Test AccountBroker.put_container
broker = AccountBroker(':memory:', account='a')
broker.initialize(Timestamp('1').internal)
# Create initial container
timestamp = Timestamp(time()).internal
broker.put_container('"{<container \'&\' name>}"', timestamp, 0, 0, 0,
POLICIES.default.idx)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Reput same event
broker.put_container('"{<container \'&\' name>}"', timestamp, 0, 0, 0,
POLICIES.default.idx)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Put new event
sleep(.00001)
timestamp = Timestamp(time()).internal
broker.put_container('"{<container \'&\' name>}"', timestamp, 0, 0, 0,
POLICIES.default.idx)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Put old event
otimestamp = Timestamp(float(Timestamp(timestamp)) - 1).internal
broker.put_container('"{<container \'&\' name>}"', otimestamp, 0, 0, 0,
POLICIES.default.idx)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Put old delete event
dtimestamp = Timestamp(float(Timestamp(timestamp)) - 1).internal
broker.put_container('"{<container \'&\' name>}"', 0, dtimestamp, 0, 0,
POLICIES.default.idx)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT delete_timestamp FROM container").fetchone()[0],
dtimestamp)
self.assertEqual(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
# Put new delete event
sleep(.00001)
timestamp = Timestamp(time()).internal
broker.put_container('"{<container \'&\' name>}"', 0, timestamp, 0, 0,
POLICIES.default.idx)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT delete_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 1)
# Put new event
sleep(.00001)
timestamp = Timestamp(time()).internal
broker.put_container('"{<container \'&\' name>}"', timestamp, 0, 0, 0,
POLICIES.default.idx)
with broker.get() as conn:
self.assertEqual(conn.execute(
"SELECT name FROM container").fetchone()[0],
'"{<container \'&\' name>}"')
self.assertEqual(conn.execute(
"SELECT put_timestamp FROM container").fetchone()[0],
timestamp)
self.assertEqual(conn.execute(
"SELECT deleted FROM container").fetchone()[0], 0)
def test_get_info(self):
# Test AccountBroker.get_info
broker = AccountBroker(':memory:', account='test1')
broker.initialize(Timestamp('1').internal)
info = broker.get_info()
self.assertEqual(info['account'], 'test1')
self.assertEqual(info['hash'], '00000000000000000000000000000000')
self.assertEqual(info['put_timestamp'], Timestamp(1).internal)
self.assertEqual(info['delete_timestamp'], '0')
if self.__class__ == TestAccountBrokerBeforeMetadata:
self.assertEqual(info['status_changed_at'], '0')
else:
self.assertEqual(info['status_changed_at'], Timestamp(1).internal)
info = broker.get_info()
self.assertEqual(info['container_count'], 0)
broker.put_container('c1', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
info = broker.get_info()
self.assertEqual(info['container_count'], 1)
sleep(.00001)
broker.put_container('c2', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
info = broker.get_info()
self.assertEqual(info['container_count'], 2)
sleep(.00001)
broker.put_container('c2', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
info = broker.get_info()
self.assertEqual(info['container_count'], 2)
sleep(.00001)
broker.put_container('c1', 0, Timestamp(time()).internal, 0, 0,
POLICIES.default.idx)
info = broker.get_info()
self.assertEqual(info['container_count'], 1)
sleep(.00001)
broker.put_container('c2', 0, Timestamp(time()).internal, 0, 0,
POLICIES.default.idx)
info = broker.get_info()
self.assertEqual(info['container_count'], 0)
def test_list_containers_iter(self):
# Test AccountBroker.list_containers_iter
broker = AccountBroker(':memory:', account='a')
broker.initialize(Timestamp('1').internal)
for cont1 in range(4):
for cont2 in range(125):
broker.put_container('%d-%04d' % (cont1, cont2),
Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
for cont in range(125):
broker.put_container('2-0051-%04d' % cont,
Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
for cont in range(125):
broker.put_container('3-%04d-0049' % cont,
Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
listing = broker.list_containers_iter(100, '', None, None, '')
self.assertEqual(len(listing), 100)
self.assertEqual(listing[0][0], '0-0000')
self.assertEqual(listing[-1][0], '0-0099')
listing = broker.list_containers_iter(100, '', '0-0050', None, '')
self.assertEqual(len(listing), 50)
self.assertEqual(listing[0][0], '0-0000')
self.assertEqual(listing[-1][0], '0-0049')
listing = broker.list_containers_iter(100, '0-0099', None, None, '')
self.assertEqual(len(listing), 100)
self.assertEqual(listing[0][0], '0-0100')
self.assertEqual(listing[-1][0], '1-0074')
listing = broker.list_containers_iter(55, '1-0074', None, None, '')
self.assertEqual(len(listing), 55)
self.assertEqual(listing[0][0], '1-0075')
self.assertEqual(listing[-1][0], '2-0004')
listing = broker.list_containers_iter(10, '', None, '0-01', '')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0-0100')
self.assertEqual(listing[-1][0], '0-0109')
listing = broker.list_containers_iter(10, '', None, '0-01', '-')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0-0100')
self.assertEqual(listing[-1][0], '0-0109')
listing = broker.list_containers_iter(10, '', None, '0-', '-')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '0-0000')
self.assertEqual(listing[-1][0], '0-0009')
listing = broker.list_containers_iter(10, '', None, '', '-')
self.assertEqual(len(listing), 4)
self.assertEqual([row[0] for row in listing],
['0-', '1-', '2-', '3-'])
listing = broker.list_containers_iter(10, '2-', None, None, '-')
self.assertEqual(len(listing), 1)
self.assertEqual([row[0] for row in listing], ['3-'])
listing = broker.list_containers_iter(10, '', None, '2', '-')
self.assertEqual(len(listing), 1)
self.assertEqual([row[0] for row in listing], ['2-'])
listing = broker.list_containers_iter(10, '2-0050', None, '2-', '-')
self.assertEqual(len(listing), 10)
self.assertEqual(listing[0][0], '2-0051')
self.assertEqual(listing[1][0], '2-0051-')
self.assertEqual(listing[2][0], '2-0052')
self.assertEqual(listing[-1][0], '2-0059')
listing = broker.list_containers_iter(10, '3-0045', None, '3-', '-')
self.assertEqual(len(listing), 10)
self.assertEqual([row[0] for row in listing],
['3-0045-', '3-0046', '3-0046-', '3-0047',
'3-0047-', '3-0048', '3-0048-', '3-0049',
'3-0049-', '3-0050'])
broker.put_container('3-0049-', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
listing = broker.list_containers_iter(10, '3-0048', None, None, None)
self.assertEqual(len(listing), 10)
self.assertEqual([row[0] for row in listing],
['3-0048-0049', '3-0049', '3-0049-', '3-0049-0049',
'3-0050', '3-0050-0049', '3-0051', '3-0051-0049',
'3-0052', '3-0052-0049'])
listing = broker.list_containers_iter(10, '3-0048', None, '3-', '-')
self.assertEqual(len(listing), 10)
self.assertEqual([row[0] for row in listing],
['3-0048-', '3-0049', '3-0049-', '3-0050',
'3-0050-', '3-0051', '3-0051-', '3-0052',
'3-0052-', '3-0053'])
listing = broker.list_containers_iter(10, None, None, '3-0049-', '-')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing],
['3-0049-', '3-0049-0049'])
def test_double_check_trailing_delimiter(self):
# Test AccountBroker.list_containers_iter for an
# account that has an odd container with a trailing delimiter
broker = AccountBroker(':memory:', account='a')
broker.initialize(Timestamp('1').internal)
broker.put_container('a', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
broker.put_container('a-', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
broker.put_container('a-a', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
broker.put_container('a-a-a', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
broker.put_container('a-a-b', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
broker.put_container('a-b', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
broker.put_container('b', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
broker.put_container('b-a', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
broker.put_container('b-b', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
broker.put_container('c', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
listing = broker.list_containers_iter(15, None, None, None, None)
self.assertEqual(len(listing), 10)
self.assertEqual([row[0] for row in listing],
['a', 'a-', 'a-a', 'a-a-a', 'a-a-b', 'a-b', 'b',
'b-a', 'b-b', 'c'])
listing = broker.list_containers_iter(15, None, None, '', '-')
self.assertEqual(len(listing), 5)
self.assertEqual([row[0] for row in listing],
['a', 'a-', 'b', 'b-', 'c'])
listing = broker.list_containers_iter(15, None, None, 'a-', '-')
self.assertEqual(len(listing), 4)
self.assertEqual([row[0] for row in listing],
['a-', 'a-a', 'a-a-', 'a-b'])
listing = broker.list_containers_iter(15, None, None, 'b-', '-')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['b-a', 'b-b'])
def test_chexor(self):
broker = AccountBroker(':memory:', account='a')
broker.initialize(Timestamp('1').internal)
broker.put_container('a', Timestamp(1).internal,
Timestamp(0).internal, 0, 0,
POLICIES.default.idx)
broker.put_container('b', Timestamp(2).internal,
Timestamp(0).internal, 0, 0,
POLICIES.default.idx)
hasha = hashlib.md5(
'%s-%s' % ('a', "%s-%s-%s-%s" % (
Timestamp(1).internal, Timestamp(0).internal, 0, 0))
).digest()
hashb = hashlib.md5(
'%s-%s' % ('b', "%s-%s-%s-%s" % (
Timestamp(2).internal, Timestamp(0).internal, 0, 0))
).digest()
hashc = \
''.join(('%02x' % (ord(a) ^ ord(b)) for a, b in zip(hasha, hashb)))
self.assertEqual(broker.get_info()['hash'], hashc)
broker.put_container('b', Timestamp(3).internal,
Timestamp(0).internal, 0, 0,
POLICIES.default.idx)
hashb = hashlib.md5(
'%s-%s' % ('b', "%s-%s-%s-%s" % (
Timestamp(3).internal, Timestamp(0).internal, 0, 0))
).digest()
hashc = \
''.join(('%02x' % (ord(a) ^ ord(b)) for a, b in zip(hasha, hashb)))
self.assertEqual(broker.get_info()['hash'], hashc)
def test_merge_items(self):
broker1 = AccountBroker(':memory:', account='a')
broker1.initialize(Timestamp('1').internal)
broker2 = AccountBroker(':memory:', account='a')
broker2.initialize(Timestamp('1').internal)
broker1.put_container('a', Timestamp(1).internal, 0, 0, 0,
POLICIES.default.idx)
broker1.put_container('b', Timestamp(2).internal, 0, 0, 0,
POLICIES.default.idx)
id = broker1.get_info()['id']
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(len(items), 2)
self.assertEqual(['a', 'b'], sorted([rec['name'] for rec in items]))
broker1.put_container('c', Timestamp(3).internal, 0, 0, 0,
POLICIES.default.idx)
broker2.merge_items(broker1.get_items_since(
broker2.get_sync(id), 1000), id)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(len(items), 3)
self.assertEqual(['a', 'b', 'c'],
sorted([rec['name'] for rec in items]))
def test_merge_items_overwrite_unicode(self):
snowman = u'\N{SNOWMAN}'.encode('utf-8')
broker1 = AccountBroker(':memory:', account='a')
broker1.initialize(Timestamp('1').internal, 0)
id1 = broker1.get_info()['id']
broker2 = AccountBroker(':memory:', account='a')
broker2.initialize(Timestamp('1').internal, 0)
broker1.put_container(snowman, Timestamp(2).internal, 0, 1, 100,
POLICIES.default.idx)
broker1.put_container('b', Timestamp(3).internal, 0, 0, 0,
POLICIES.default.idx)
broker2.merge_items(json.loads(json.dumps(broker1.get_items_since(
broker2.get_sync(id1), 1000))), id1)
broker1.put_container(snowman, Timestamp(4).internal, 0, 2, 200,
POLICIES.default.idx)
broker2.merge_items(json.loads(json.dumps(broker1.get_items_since(
broker2.get_sync(id1), 1000))), id1)
items = broker2.get_items_since(-1, 1000)
self.assertEqual(['b', snowman],
sorted([rec['name'] for rec in items]))
items_by_name = dict((rec['name'], rec) for rec in items)
self.assertEqual(items_by_name[snowman]['object_count'], 2)
self.assertEqual(items_by_name[snowman]['bytes_used'], 200)
self.assertEqual(items_by_name['b']['object_count'], 0)
self.assertEqual(items_by_name['b']['bytes_used'], 0)
def test_load_old_pending_puts(self):
# pending puts from pre-storage-policy account brokers won't contain
# the storage policy index
tempdir = mkdtemp()
broker_path = os.path.join(tempdir, 'test-load-old.db')
try:
broker = AccountBroker(broker_path, account='real')
broker.initialize(Timestamp(1).internal)
with open(broker_path + '.pending', 'a+b') as pending:
pending.write(':')
pending.write(pickle.dumps(
# name, put_timestamp, delete_timestamp, object_count,
# bytes_used, deleted
('oldcon', Timestamp(200).internal,
Timestamp(0).internal,
896, 9216695, 0)).encode('base64'))
broker._commit_puts()
with broker.get() as conn:
results = list(conn.execute('''
SELECT name, storage_policy_index FROM container
'''))
self.assertEqual(len(results), 1)
self.assertEqual(dict(results[0]),
{'name': 'oldcon', 'storage_policy_index': 0})
finally:
rmtree(tempdir)
@patch_policies([StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True),
StoragePolicy(2, 'two', False),
StoragePolicy(3, 'three', False)])
def test_get_policy_stats(self):
ts = (Timestamp(t).internal for t in itertools.count(int(time())))
broker = AccountBroker(':memory:', account='a')
broker.initialize(next(ts))
# check empty policy_stats
self.assertTrue(broker.empty())
policy_stats = broker.get_policy_stats()
self.assertEqual(policy_stats, {})
# add some empty containers
for policy in POLICIES:
container_name = 'c-%s' % policy.name
put_timestamp = next(ts)
broker.put_container(container_name,
put_timestamp, 0,
0, 0,
policy.idx)
policy_stats = broker.get_policy_stats()
stats = policy_stats[policy.idx]
if 'container_count' in stats:
self.assertEqual(stats['container_count'], 1)
self.assertEqual(stats['object_count'], 0)
self.assertEqual(stats['bytes_used'], 0)
# update the containers object & byte count
for policy in POLICIES:
container_name = 'c-%s' % policy.name
put_timestamp = next(ts)
count = policy.idx * 100 # good as any integer
broker.put_container(container_name,
put_timestamp, 0,
count, count,
policy.idx)
policy_stats = broker.get_policy_stats()
stats = policy_stats[policy.idx]
if 'container_count' in stats:
self.assertEqual(stats['container_count'], 1)
self.assertEqual(stats['object_count'], count)
self.assertEqual(stats['bytes_used'], count)
# check all the policy_stats at once
for policy_index, stats in policy_stats.items():
policy = POLICIES[policy_index]
count = policy.idx * 100 # coupled with policy for test
if 'container_count' in stats:
self.assertEqual(stats['container_count'], 1)
self.assertEqual(stats['object_count'], count)
self.assertEqual(stats['bytes_used'], count)
# now delete the containers one by one
for policy in POLICIES:
container_name = 'c-%s' % policy.name
delete_timestamp = next(ts)
broker.put_container(container_name,
0, delete_timestamp,
0, 0,
policy.idx)
policy_stats = broker.get_policy_stats()
stats = policy_stats[policy.idx]
if 'container_count' in stats:
self.assertEqual(stats['container_count'], 0)
self.assertEqual(stats['object_count'], 0)
self.assertEqual(stats['bytes_used'], 0)
@patch_policies([StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True)])
def test_policy_stats_tracking(self):
ts = (Timestamp(t).internal for t in itertools.count(int(time())))
broker = AccountBroker(':memory:', account='a')
broker.initialize(next(ts))
# policy 0
broker.put_container('con1', next(ts), 0, 12, 2798641, 0)
broker.put_container('con1', next(ts), 0, 13, 8156441, 0)
# policy 1
broker.put_container('con2', next(ts), 0, 7, 5751991, 1)
broker.put_container('con2', next(ts), 0, 8, 6085379, 1)
stats = broker.get_policy_stats()
self.assertEqual(len(stats), 2)
if 'container_count' in stats[0]:
self.assertEqual(stats[0]['container_count'], 1)
self.assertEqual(stats[0]['object_count'], 13)
self.assertEqual(stats[0]['bytes_used'], 8156441)
if 'container_count' in stats[1]:
self.assertEqual(stats[1]['container_count'], 1)
self.assertEqual(stats[1]['object_count'], 8)
self.assertEqual(stats[1]['bytes_used'], 6085379)
# Break encapsulation here to make sure that there's only 2 rows in
# the stats table. It's possible that there could be 4 rows (one per
# put_container) but that they came out in the right order so that
# get_policy_stats() collapsed them down to the right number. To prove
# that's not so, we have to go peek at the broker's internals.
with broker.get() as conn:
nrows = conn.execute(
"SELECT COUNT(*) FROM policy_stat").fetchall()[0][0]
self.assertEqual(nrows, 2)
def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):
"""
The AccountBroker initialze() function before we added the
policy stat table. Used by test_policy_table_creation() to
make sure that the AccountBroker will correctly add the table
for cases where the DB existed before the policy support was added.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if not self.account:
raise ValueError(
'Attempting to create a new database with no account set')
self.create_container_table(conn)
self.create_account_stat_table(conn, put_timestamp)
def premetadata_create_account_stat_table(self, conn, put_timestamp):
"""
Copied from AccountBroker before the metadata column was
added; used for testing with TestAccountBrokerBeforeMetadata.
Create account_stat table which is specific to the account DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
conn.executescript('''
CREATE TABLE account_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
container_count INTEGER,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0'
);
INSERT INTO account_stat (container_count) VALUES (0);
''')
conn.execute('''
UPDATE account_stat SET account = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, Timestamp(time()).internal, str(uuid4()),
put_timestamp))
class TestCommonAccountBroker(TestExampleBroker):
broker_class = AccountBroker
def setUp(self):
super(TestCommonAccountBroker, self).setUp()
self.policy = random.choice(list(POLICIES))
def put_item(self, broker, timestamp):
broker.put_container('test', timestamp, 0, 0, 0,
int(self.policy))
def delete_item(self, broker, timestamp):
broker.put_container('test', 0, timestamp, 0, 0,
int(self.policy))
class TestAccountBrokerBeforeMetadata(TestAccountBroker):
"""
Tests for AccountBroker against databases created before
the metadata column was added.
"""
def setUp(self):
self._imported_create_account_stat_table = \
AccountBroker.create_account_stat_table
AccountBroker.create_account_stat_table = \
premetadata_create_account_stat_table
broker = AccountBroker(':memory:', account='a')
broker.initialize(Timestamp('1').internal)
exc = None
with broker.get() as conn:
try:
conn.execute('SELECT metadata FROM account_stat')
except BaseException as err:
exc = err
self.assertTrue('no such column: metadata' in str(exc))
def tearDown(self):
AccountBroker.create_account_stat_table = \
self._imported_create_account_stat_table
broker = AccountBroker(':memory:', account='a')
broker.initialize(Timestamp('1').internal)
with broker.get() as conn:
conn.execute('SELECT metadata FROM account_stat')
def prespi_create_container_table(self, conn):
"""
Copied from AccountBroker before the sstoage_policy_index column was
added; used for testing with TestAccountBrokerBeforeSPI.
Create container table which is specific to the account DB.
:param conn: DB connection object
"""
conn.executescript("""
CREATE TABLE container (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
put_timestamp TEXT,
delete_timestamp TEXT,
object_count INTEGER,
bytes_used INTEGER,
deleted INTEGER DEFAULT 0
);
CREATE INDEX ix_container_deleted_name ON
container (deleted, name);
CREATE TRIGGER container_insert AFTER INSERT ON container
BEGIN
UPDATE account_stat
SET container_count = container_count + (1 - new.deleted),
object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used,
hash = chexor(hash, new.name,
new.put_timestamp || '-' ||
new.delete_timestamp || '-' ||
new.object_count || '-' || new.bytes_used);
END;
CREATE TRIGGER container_update BEFORE UPDATE ON container
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER container_delete AFTER DELETE ON container
BEGIN
UPDATE account_stat
SET container_count = container_count - (1 - old.deleted),
object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used,
hash = chexor(hash, old.name,
old.put_timestamp || '-' ||
old.delete_timestamp || '-' ||
old.object_count || '-' || old.bytes_used);
END;
""")
class TestAccountBrokerBeforeSPI(TestAccountBroker):
"""
Tests for AccountBroker against databases created before
the storage_policy_index column was added.
"""
def setUp(self):
self._imported_create_container_table = \
AccountBroker.create_container_table
AccountBroker.create_container_table = \
prespi_create_container_table
self._imported_initialize = AccountBroker._initialize
AccountBroker._initialize = prespi_AccountBroker_initialize
broker = AccountBroker(':memory:', account='a')
broker.initialize(Timestamp('1').internal)
exc = None
with broker.get() as conn:
try:
conn.execute('SELECT storage_policy_index FROM container')
except BaseException as err:
exc = err
self.assertTrue('no such column: storage_policy_index' in str(exc))
with broker.get() as conn:
try:
conn.execute('SELECT * FROM policy_stat')
except sqlite3.OperationalError as err:
self.assertTrue('no such table: policy_stat' in str(err))
else:
self.fail('database created with policy_stat table')
def tearDown(self):
AccountBroker.create_container_table = \
self._imported_create_container_table
AccountBroker._initialize = self._imported_initialize
broker = AccountBroker(':memory:', account='a')
broker.initialize(Timestamp('1').internal)
with broker.get() as conn:
conn.execute('SELECT storage_policy_index FROM container')
@with_tempdir
def test_policy_table_migration(self, tempdir):
db_path = os.path.join(tempdir, 'account.db')
# first init an acct DB without the policy_stat table present
broker = AccountBroker(db_path, account='a')
broker.initialize(Timestamp('1').internal)
with broker.get() as conn:
try:
conn.execute('''
SELECT * FROM policy_stat
''').fetchone()[0]
except sqlite3.OperationalError as err:
# confirm that the table really isn't there
self.assertTrue('no such table: policy_stat' in str(err))
else:
self.fail('broker did not raise sqlite3.OperationalError '
'trying to select from policy_stat table!')
# make sure we can HEAD this thing w/o the table
stats = broker.get_policy_stats()
self.assertEqual(len(stats), 0)
# now do a PUT to create the table
broker.put_container('o', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
broker._commit_puts_stale_ok()
# now confirm that the table was created
with broker.get() as conn:
conn.execute('SELECT * FROM policy_stat')
stats = broker.get_policy_stats()
self.assertEqual(len(stats), 1)
@patch_policies
@with_tempdir
def test_container_table_migration(self, tempdir):
db_path = os.path.join(tempdir, 'account.db')
# first init an acct DB without the policy_stat table present
broker = AccountBroker(db_path, account='a')
broker.initialize(Timestamp('1').internal)
with broker.get() as conn:
try:
conn.execute('''
SELECT storage_policy_index FROM container
''').fetchone()[0]
except sqlite3.OperationalError as err:
# confirm that the table doesn't have this column
self.assertTrue('no such column: storage_policy_index' in
str(err))
else:
self.fail('broker did not raise sqlite3.OperationalError '
'trying to select from storage_policy_index '
'from container table!')
# manually insert an existing row to avoid migration
with broker.get() as conn:
conn.execute('''
INSERT INTO container (name, put_timestamp,
delete_timestamp, object_count, bytes_used,
deleted)
VALUES (?, ?, ?, ?, ?, ?)
''', ('test_name', Timestamp(time()).internal, 0, 1, 2, 0))
conn.commit()
# make sure we can iter containers without the migration
for c in broker.list_containers_iter(1, None, None, None, None):
self.assertEqual(c, ('test_name', 1, 2, 0))
# stats table is mysteriously empty...
stats = broker.get_policy_stats()
self.assertEqual(len(stats), 0)
# now do a PUT with a different value for storage_policy_index
# which will update the DB schema as well as update policy_stats
# for legacy containers in the DB (those without an SPI)
other_policy = [p for p in POLICIES if p.idx != 0][0]
broker.put_container('test_second', Timestamp(time()).internal,
0, 3, 4, other_policy.idx)
broker._commit_puts_stale_ok()
with broker.get() as conn:
rows = conn.execute('''
SELECT name, storage_policy_index FROM container
''').fetchall()
for row in rows:
if row[0] == 'test_name':
self.assertEqual(row[1], 0)
else:
self.assertEqual(row[1], other_policy.idx)
# we should have stats for both containers
stats = broker.get_policy_stats()
self.assertEqual(len(stats), 2)
if 'container_count' in stats[0]:
self.assertEqual(stats[0]['container_count'], 1)
self.assertEqual(stats[0]['object_count'], 1)
self.assertEqual(stats[0]['bytes_used'], 2)
if 'container_count' in stats[1]:
self.assertEqual(stats[1]['container_count'], 1)
self.assertEqual(stats[1]['object_count'], 3)
self.assertEqual(stats[1]['bytes_used'], 4)
# now lets delete a container and make sure policy_stats is OK
with broker.get() as conn:
conn.execute('''
DELETE FROM container WHERE name = ?
''', ('test_name',))
conn.commit()
stats = broker.get_policy_stats()
self.assertEqual(len(stats), 2)
if 'container_count' in stats[0]:
self.assertEqual(stats[0]['container_count'], 0)
self.assertEqual(stats[0]['object_count'], 0)
self.assertEqual(stats[0]['bytes_used'], 0)
if 'container_count' in stats[1]:
self.assertEqual(stats[1]['container_count'], 1)
self.assertEqual(stats[1]['object_count'], 3)
self.assertEqual(stats[1]['bytes_used'], 4)
@with_tempdir
def test_half_upgraded_database(self, tempdir):
db_path = os.path.join(tempdir, 'account.db')
ts = itertools.count()
ts = (Timestamp(t).internal for t in itertools.count(int(time())))
broker = AccountBroker(db_path, account='a')
broker.initialize(next(ts))
self.assertTrue(broker.empty())
# add a container (to pending file)
broker.put_container('c', next(ts), 0, 0, 0,
POLICIES.default.idx)
real_get = broker.get
called = []
@contextmanager
def mock_get():
with real_get() as conn:
def mock_executescript(script):
if called:
raise Exception('kaboom!')
called.append(script)
conn.executescript = mock_executescript
yield conn
broker.get = mock_get
try:
broker._commit_puts()
except Exception:
pass
else:
self.fail('mock exception was not raised')
self.assertEqual(len(called), 1)
self.assertTrue('CREATE TABLE policy_stat' in called[0])
# nothing was committed
broker = AccountBroker(db_path, account='a')
with broker.get() as conn:
try:
conn.execute('SELECT * FROM policy_stat')
except sqlite3.OperationalError as err:
self.assertTrue('no such table: policy_stat' in str(err))
else:
self.fail('half upgraded database!')
container_count = conn.execute(
'SELECT count(*) FROM container').fetchone()[0]
self.assertEqual(container_count, 0)
# try again to commit puts
self.assertFalse(broker.empty())
# full migration successful
with broker.get() as conn:
conn.execute('SELECT * FROM policy_stat')
conn.execute('SELECT storage_policy_index FROM container')
@with_tempdir
def test_pre_storage_policy_replication(self, tempdir):
ts = make_timestamp_iter()
# make and two account database "replicas"
old_broker = AccountBroker(os.path.join(tempdir, 'old_account.db'),
account='a')
old_broker.initialize(next(ts).internal)
new_broker = AccountBroker(os.path.join(tempdir, 'new_account.db'),
account='a')
new_broker.initialize(next(ts).internal)
# manually insert an existing row to avoid migration for old database
with old_broker.get() as conn:
conn.execute('''
INSERT INTO container (name, put_timestamp,
delete_timestamp, object_count, bytes_used,
deleted)
VALUES (?, ?, ?, ?, ?, ?)
''', ('test_name', next(ts).internal, 0, 1, 2, 0))
conn.commit()
# get replication info and rows form old database
info = old_broker.get_info()
rows = old_broker.get_items_since(0, 10)
# "send" replication rows to new database
new_broker.merge_items(rows, info['id'])
# make sure "test_name" container in new database
self.assertEqual(new_broker.get_info()['container_count'], 1)
for c in new_broker.list_containers_iter(1, None, None, None, None):
self.assertEqual(c, ('test_name', 1, 2, 0))
# full migration successful
with new_broker.get() as conn:
conn.execute('SELECT * FROM policy_stat')
conn.execute('SELECT storage_policy_index FROM container')
def pre_track_containers_create_policy_stat(self, conn):
"""
Copied from AccountBroker before the container_count column was
added.
Create policy_stat table which is specific to the account DB.
Not a part of Pluggable Back-ends, internal to the baseline code.
:param conn: DB connection object
"""
conn.executescript("""
CREATE TABLE policy_stat (
storage_policy_index INTEGER PRIMARY KEY,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0
);
INSERT OR IGNORE INTO policy_stat (
storage_policy_index, object_count, bytes_used
)
SELECT 0, object_count, bytes_used
FROM account_stat
WHERE container_count > 0;
""")
def pre_track_containers_create_container_table(self, conn):
"""
Copied from AccountBroker before the container_count column was
added (using old stat trigger script)
Create container table which is specific to the account DB.
:param conn: DB connection object
"""
# revert to old trigger script to support one of the tests
OLD_POLICY_STAT_TRIGGER_SCRIPT = """
CREATE TRIGGER container_insert_ps AFTER INSERT ON container
BEGIN
INSERT OR IGNORE INTO policy_stat
(storage_policy_index, object_count, bytes_used)
VALUES (new.storage_policy_index, 0, 0);
UPDATE policy_stat
SET object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used
WHERE storage_policy_index = new.storage_policy_index;
END;
CREATE TRIGGER container_delete_ps AFTER DELETE ON container
BEGIN
UPDATE policy_stat
SET object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used
WHERE storage_policy_index = old.storage_policy_index;
END;
"""
conn.executescript("""
CREATE TABLE container (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
put_timestamp TEXT,
delete_timestamp TEXT,
object_count INTEGER,
bytes_used INTEGER,
deleted INTEGER DEFAULT 0,
storage_policy_index INTEGER DEFAULT 0
);
CREATE INDEX ix_container_deleted_name ON
container (deleted, name);
CREATE TRIGGER container_insert AFTER INSERT ON container
BEGIN
UPDATE account_stat
SET container_count = container_count + (1 - new.deleted),
object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used,
hash = chexor(hash, new.name,
new.put_timestamp || '-' ||
new.delete_timestamp || '-' ||
new.object_count || '-' || new.bytes_used);
END;
CREATE TRIGGER container_update BEFORE UPDATE ON container
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER container_delete AFTER DELETE ON container
BEGIN
UPDATE account_stat
SET container_count = container_count - (1 - old.deleted),
object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used,
hash = chexor(hash, old.name,
old.put_timestamp || '-' ||
old.delete_timestamp || '-' ||
old.object_count || '-' || old.bytes_used);
END;
""" + OLD_POLICY_STAT_TRIGGER_SCRIPT)
class AccountBrokerPreTrackContainerCountSetup(object):
def assertUnmigrated(self, broker):
with broker.get() as conn:
try:
conn.execute('''
SELECT container_count FROM policy_stat
''').fetchone()[0]
except sqlite3.OperationalError as err:
# confirm that the column really isn't there
self.assertTrue('no such column: container_count' in str(err))
else:
self.fail('broker did not raise sqlite3.OperationalError '
'trying to select container_count from policy_stat!')
def setUp(self):
# use old version of policy_stat
self._imported_create_policy_stat_table = \
AccountBroker.create_policy_stat_table
AccountBroker.create_policy_stat_table = \
pre_track_containers_create_policy_stat
# use old container table so we use old trigger for
# updating policy_stat
self._imported_create_container_table = \
AccountBroker.create_container_table
AccountBroker.create_container_table = \
pre_track_containers_create_container_table
broker = AccountBroker(':memory:', account='a')
broker.initialize(Timestamp('1').internal)
self.assertUnmigrated(broker)
self.tempdir = mkdtemp()
self.ts = (Timestamp(t).internal for t in itertools.count(int(time())))
self.db_path = os.path.join(self.tempdir, 'sda', 'accounts',
'0', '0', '0', 'test.db')
self.broker = AccountBroker(self.db_path, account='a')
self.broker.initialize(next(self.ts))
# Common sanity-check that our starting, pre-migration state correctly
# does not have the container_count column.
self.assertUnmigrated(self.broker)
def tearDown(self):
rmtree(self.tempdir, ignore_errors=True)
self.restore_account_broker()
broker = AccountBroker(':memory:', account='a')
broker.initialize(Timestamp('1').internal)
with broker.get() as conn:
conn.execute('SELECT container_count FROM policy_stat')
def restore_account_broker(self):
AccountBroker.create_policy_stat_table = \
self._imported_create_policy_stat_table
AccountBroker.create_container_table = \
self._imported_create_container_table
@patch_policies([StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True),
StoragePolicy(2, 'two', False),
StoragePolicy(3, 'three', False)])
class TestAccountBrokerBeforePerPolicyContainerTrack(
AccountBrokerPreTrackContainerCountSetup, TestAccountBroker):
"""
Tests for AccountBroker against databases created before
the container_count column was added to the policy_stat table.
"""
def test_policy_table_cont_count_do_migrations(self):
# add a few containers
num_containers = 8
policies = itertools.cycle(POLICIES)
per_policy_container_counts = defaultdict(int)
# add a few container entries
for i in range(num_containers):
name = 'test-container-%02d' % i
policy = next(policies)
self.broker.put_container(name, next(self.ts),
0, 0, 0, int(policy))
per_policy_container_counts[int(policy)] += 1
total_container_count = self.broker.get_info()['container_count']
self.assertEqual(total_container_count, num_containers)
# still un-migrated
self.assertUnmigrated(self.broker)
policy_stats = self.broker.get_policy_stats()
self.assertEqual(len(policy_stats), len(per_policy_container_counts))
for stats in policy_stats.values():
self.assertEqual(stats['object_count'], 0)
self.assertEqual(stats['bytes_used'], 0)
# un-migrated dbs should not return container_count
self.assertFalse('container_count' in stats)
# now force the migration
policy_stats = self.broker.get_policy_stats(do_migrations=True)
self.assertEqual(len(policy_stats), len(per_policy_container_counts))
for policy_index, stats in policy_stats.items():
self.assertEqual(stats['object_count'], 0)
self.assertEqual(stats['bytes_used'], 0)
self.assertEqual(stats['container_count'],
per_policy_container_counts[policy_index])
def test_policy_table_cont_count_update_get_stats(self):
# add a few container entries
for policy in POLICIES:
for i in range(0, policy.idx + 1):
container_name = 'c%s_0' % policy.idx
self.broker.put_container('c%s_%s' % (policy.idx, i),
0, 0, 0, 0, policy.idx)
# _commit_puts_stale_ok() called by get_policy_stats()
# calling get_policy_stats() with do_migrations will alter the table
# and populate it based on what's in the container table now
stats = self.broker.get_policy_stats(do_migrations=True)
# now confirm that the column was created
with self.broker.get() as conn:
conn.execute('SELECT container_count FROM policy_stat')
# confirm stats reporting back correctly
self.assertEqual(len(stats), 4)
for policy in POLICIES:
self.assertEqual(stats[policy.idx]['container_count'],
policy.idx + 1)
# now delete one from each policy and check the stats
with self.broker.get() as conn:
for policy in POLICIES:
container_name = 'c%s_0' % policy.idx
conn.execute('''
DELETE FROM container
WHERE name = ?
''', (container_name,))
conn.commit()
stats = self.broker.get_policy_stats()
self.assertEqual(len(stats), 4)
for policy in POLICIES:
self.assertEqual(stats[policy.idx]['container_count'],
policy.idx)
# now put them back and make sure things are still cool
for policy in POLICIES:
container_name = 'c%s_0' % policy.idx
self.broker.put_container(container_name, 0, 0, 0, 0, policy.idx)
# _commit_puts_stale_ok() called by get_policy_stats()
# confirm stats reporting back correctly
stats = self.broker.get_policy_stats()
self.assertEqual(len(stats), 4)
for policy in POLICIES:
self.assertEqual(stats[policy.idx]['container_count'],
policy.idx + 1)
def test_per_policy_cont_count_migration_with_deleted(self):
num_containers = 15
policies = itertools.cycle(POLICIES)
container_policy_map = {}
# add a few container entries
for i in range(num_containers):
name = 'test-container-%02d' % i
policy = next(policies)
self.broker.put_container(name, next(self.ts),
0, 0, 0, int(policy))
# keep track of stub container policies
container_policy_map[name] = policy
# delete about half of the containers
for i in range(0, num_containers, 2):
name = 'test-container-%02d' % i
policy = container_policy_map[name]
self.broker.put_container(name, 0, next(self.ts),
0, 0, int(policy))
total_container_count = self.broker.get_info()['container_count']
self.assertEqual(total_container_count, num_containers / 2)
# trigger migration
policy_info = self.broker.get_policy_stats(do_migrations=True)
self.assertEqual(len(policy_info), min(num_containers, len(POLICIES)))
policy_container_count = sum(p['container_count'] for p in
policy_info.values())
self.assertEqual(total_container_count, policy_container_count)
def test_per_policy_cont_count_migration_with_single_policy(self):
num_containers = 100
with patch_policies(legacy_only=True):
policy = POLICIES[0]
# add a few container entries
for i in range(num_containers):
name = 'test-container-%02d' % i
self.broker.put_container(name, next(self.ts),
0, 0, 0, int(policy))
# delete about half of the containers
for i in range(0, num_containers, 2):
name = 'test-container-%02d' % i
self.broker.put_container(name, 0, next(self.ts),
0, 0, int(policy))
total_container_count = self.broker.get_info()['container_count']
# trigger migration
policy_info = self.broker.get_policy_stats(do_migrations=True)
self.assertEqual(total_container_count, num_containers / 2)
self.assertEqual(len(policy_info), 1)
policy_container_count = sum(p['container_count'] for p in
policy_info.values())
self.assertEqual(total_container_count, policy_container_count)
def test_per_policy_cont_count_migration_impossible(self):
with patch_policies(legacy_only=True):
# add a container for the legacy policy
policy = POLICIES[0]
self.broker.put_container('test-legacy-container', next(self.ts),
0, 0, 0, int(policy))
# now create an impossible situation by adding a container for a
# policy index that doesn't exist
non_existent_policy_index = int(policy) + 1
self.broker.put_container('test-non-existent-policy',
next(self.ts), 0, 0, 0,
non_existent_policy_index)
total_container_count = self.broker.get_info()['container_count']
# trigger migration
policy_info = self.broker.get_policy_stats(do_migrations=True)
self.assertEqual(total_container_count, 2)
self.assertEqual(len(policy_info), 2)
for policy_stat in policy_info.values():
self.assertEqual(policy_stat['container_count'], 1)
| apache-2.0 |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/WebKit/Tools/lldb/lldb_webkit.py | 61 | 11453 | # Copyright (C) 2012 Apple. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
LLDB Support for WebKit Types
Add the following to your .lldbinit file to add WebKit Type summaries in LLDB and Xcode:
command script import {Path to WebKit Root}/Tools/lldb/lldb_webkit.py
"""
import lldb
def __lldb_init_module(debugger, dict):
debugger.HandleCommand('type summary add --expand -F lldb_webkit.WTFString_SummaryProvider WTF::String')
debugger.HandleCommand('type summary add --expand -F lldb_webkit.WTFStringImpl_SummaryProvider WTF::StringImpl')
debugger.HandleCommand('type summary add --expand -F lldb_webkit.WTFAtomicString_SummaryProvider WTF::AtomicString')
debugger.HandleCommand('type summary add --expand -F lldb_webkit.WTFVector_SummaryProvider -x "WTF::Vector<.+>$"')
debugger.HandleCommand('type summary add --expand -F lldb_webkit.WTFHashTable_SummaryProvider -x "WTF::HashTable<.+>$"')
debugger.HandleCommand('type synthetic add -x "WTF::Vector<.+>$" --python-class lldb_webkit.WTFVectorProvider')
debugger.HandleCommand('type synthetic add -x "WTF::HashTable<.+>$" --python-class lldb_webkit.WTFHashTableProvider')
debugger.HandleCommand('type summary add -F lldb_webkit.WebCoreLayoutUnit_SummaryProvider WebCore::LayoutUnit')
debugger.HandleCommand('type summary add -F lldb_webkit.WebCoreLayoutSize_SummaryProvider WebCore::LayoutSize')
debugger.HandleCommand('type summary add -F lldb_webkit.WebCoreLayoutPoint_SummaryProvider WebCore::LayoutPoint')
def WTFString_SummaryProvider(valobj, dict):
provider = WTFStringProvider(valobj, dict)
return "{ length = %d, contents = '%s' }" % (provider.get_length(), provider.to_string())
def WTFStringImpl_SummaryProvider(valobj, dict):
provider = WTFStringImplProvider(valobj, dict)
return "{ length = %d, is8bit = %d, contents = '%s' }" % (provider.get_length(), provider.is_8bit(), provider.to_string())
def WTFAtomicString_SummaryProvider(valobj, dict):
return WTFString_SummaryProvider(valobj.GetChildMemberWithName('m_string'), dict)
def WTFVector_SummaryProvider(valobj, dict):
provider = WTFVectorProvider(valobj, dict)
return "{ size = %d, capacity = %d }" % (provider.size, provider.capacity)
def WTFHashTable_SummaryProvider(valobj, dict):
provider = WTFHashTableProvider(valobj, dict)
return "{ tableSize = %d, keyCount = %d }" % (provider.tableSize(), provider.keyCount())
def WebCoreLayoutUnit_SummaryProvider(valobj, dict):
provider = WebCoreLayoutUnitProvider(valobj, dict)
return "{ %s }" % provider.to_string()
def WebCoreLayoutSize_SummaryProvider(valobj, dict):
provider = WebCoreLayoutSizeProvider(valobj, dict)
return "{ width = %s, height = %s }" % (provider.get_width(), provider.get_height())
def WebCoreLayoutPoint_SummaryProvider(valobj, dict):
provider = WebCoreLayoutPointProvider(valobj, dict)
return "{ x = %s, y = %s }" % (provider.get_x(), provider.get_y())
# FIXME: Provide support for the following types:
# def WTFCString_SummaryProvider(valobj, dict):
# def WebCoreKURLGooglePrivate_SummaryProvider(valobj, dict):
# def WebCoreQualifiedName_SummaryProvider(valobj, dict):
# def JSCIdentifier_SummaryProvider(valobj, dict):
# def JSCJSString_SummaryProvider(valobj, dict):
def guess_string_length(valobj, error):
if not valobj.GetValue():
return 0
for i in xrange(0, 2048):
if valobj.GetPointeeData(i, 1).GetUnsignedInt16(error, 0) == 0:
return i
return 256
def ustring_to_string(valobj, error, length=None):
if length is None:
length = guess_string_length(valobj, error)
else:
length = int(length)
out_string = u""
for i in xrange(0, length):
char_value = valobj.GetPointeeData(i, 1).GetUnsignedInt16(error, 0)
out_string = out_string + unichr(char_value)
return out_string.encode('utf-8')
def lstring_to_string(valobj, error, length=None):
if length is None:
length = guess_string_length(valobj, error)
else:
length = int(length)
out_string = u""
for i in xrange(0, length):
char_value = valobj.GetPointeeData(i, 1).GetUnsignedInt8(error, 0)
out_string = out_string + unichr(char_value)
return out_string.encode('utf-8')
class WTFStringImplProvider:
def __init__(self, valobj, dict):
self.valobj = valobj
def get_length(self):
return self.valobj.GetChildMemberWithName('m_length').GetValueAsUnsigned(0)
def get_data8(self):
return self.valobj.GetChildAtIndex(2).GetChildMemberWithName('m_data8')
def get_data16(self):
return self.valobj.GetChildAtIndex(2).GetChildMemberWithName('m_data16')
def to_string(self):
error = lldb.SBError()
if self.is_8bit():
return lstring_to_string(self.get_data8(), error, self.get_length())
return ustring_to_string(self.get_data16(), error, self.get_length())
def is_8bit(self):
# FIXME: find a way to access WTF::StringImpl::s_hashFlag8BitBuffer
return bool(self.valobj.GetChildMemberWithName('m_hashAndFlags').GetValueAsUnsigned(0) \
& 1 << 6)
class WTFStringProvider:
def __init__(self, valobj, dict):
self.valobj = valobj
def stringimpl(self):
impl_ptr = self.valobj.GetChildMemberWithName('m_impl').GetChildMemberWithName('m_ptr')
return WTFStringImplProvider(impl_ptr, dict)
def get_length(self):
impl = self.stringimpl()
if not impl:
return 0
return impl.get_length()
def to_string(self):
impl = self.stringimpl()
if not impl:
return u""
return impl.to_string()
class WebCoreLayoutUnitProvider:
"Print a WebCore::LayoutUnit"
def __init__(self, valobj, dict):
self.valobj = valobj
def to_string(self):
return "%gpx" % (self.valobj.GetChildMemberWithName('m_value').GetValueAsUnsigned(0) / 64.0)
class WebCoreLayoutSizeProvider:
"Print a WebCore::LayoutSize"
def __init__(self, valobj, dict):
self.valobj = valobj
def get_width(self):
return WebCoreLayoutUnitProvider(self.valobj.GetChildMemberWithName('m_width'), dict).to_string()
def get_height(self):
return WebCoreLayoutUnitProvider(self.valobj.GetChildMemberWithName('m_height'), dict).to_string()
class WebCoreLayoutPointProvider:
"Print a WebCore::LayoutPoint"
def __init__(self, valobj, dict):
self.valobj = valobj
def get_x(self):
return WebCoreLayoutUnitProvider(self.valobj.GetChildMemberWithName('m_x'), dict).to_string()
def get_y(self):
return WebCoreLayoutUnitProvider(self.valobj.GetChildMemberWithName('m_y'), dict).to_string()
class WTFVectorProvider:
def __init__(self, valobj, internal_dict):
self.valobj = valobj
self.update()
def num_children(self):
return self.size + 3
def get_child_index(self, name):
if name == "m_size":
return self.size
elif name == "m_capacity":
return self.size + 1
elif name == "m_buffer":
return self.size + 2
else:
return int(name.lstrip('[').rstrip(']'))
def get_child_at_index(self, index):
if index == self.size:
return self.valobj.GetChildMemberWithName("m_size")
elif index == self.size + 1:
return self.valobj.GetChildMemberWithName("m_capacity")
elif index == self.size + 2:
return self.buffer
elif index < self.size:
offset = index * self.data_size
child = self.buffer.CreateChildAtOffset('[' + str(index) + ']', offset, self.data_type)
return child
else:
return None
def update(self):
self.buffer = self.valobj.GetChildMemberWithName('m_buffer')
self.size = self.valobj.GetChildMemberWithName('m_size').GetValueAsUnsigned(0)
self.capacity = self.buffer.GetChildMemberWithName('m_capacity').GetValueAsUnsigned(0)
self.data_type = self.buffer.GetType().GetPointeeType()
self.data_size = self.data_type.GetByteSize()
def has_children(self):
return True
class WTFHashTableProvider:
def __init__(self, valobj, internal_dict):
self.valobj = valobj
self.update()
def num_children(self):
return self.tableSize() + 5
def get_child_index(self, name):
if name == "m_table":
return self.tableSize()
elif name == "m_tableSize":
return self.tableSize() + 1
elif name == "m_tableSizeMask":
return self.tableSize() + 2
elif name == "m_keyCount":
return self.tableSize() + 3
elif name == "m_deletedCount":
return self.tableSize() + 4
else:
return int(name.lstrip('[').rstrip(']'))
def get_child_at_index(self, index):
if index == self.tableSize():
return self.valobj.GetChildMemberWithName('m_table')
elif index == self.tableSize() + 1:
return self.valobj.GetChildMemberWithName('m_tableSize')
elif index == self.tableSize() + 2:
return self.valobj.GetChildMemberWithName('m_tableSizeMask')
elif index == self.tableSize() + 3:
return self.valobj.GetChildMemberWithName('m_keyCount')
elif index == self.tableSize() + 4:
return self.valobj.GetChildMemberWithName('m_deletedCount')
elif index < self.tableSize():
table = self.valobj.GetChildMemberWithName('m_table')
return table.CreateChildAtOffset('[' + str(index) + ']', index * self.data_size, self.data_type)
else:
return None
def tableSize(self):
return self.valobj.GetChildMemberWithName('m_tableSize').GetValueAsUnsigned(0)
def keyCount(self):
return self.valobj.GetChildMemberWithName('m_keyCount').GetValueAsUnsigned(0)
def update(self):
self.data_type = self.valobj.GetType().GetTemplateArgumentType(0)
self.data_size = self.data_type.GetByteSize()
def has_children(self):
return True
| gpl-3.0 |
kapilt/ansible | lib/ansible/plugins/action/async.py | 3 | 3337 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import random
from ansible import constants as C
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
''' transfer the given module name, plus the async module, then run it '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if self._play_context.check_mode:
result['skipped'] = True
result['msg'] = 'check mode not supported for this module'
return result
if not tmp:
tmp = self._make_tmp_path()
module_name = self._task.action
async_module_path = self._connection._shell.join_path(tmp, 'async_wrapper')
remote_module_path = self._connection._shell.join_path(tmp, module_name)
env_string = self._compute_environment_string()
module_args = self._task.args.copy()
if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG:
module_args['_ansible_no_log'] = True
# configure, upload, and chmod the target module
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
self._transfer_data(remote_module_path, module_data)
self._remote_chmod('a+rx', remote_module_path)
# configure, upload, and chmod the async_wrapper module
(async_module_style, shebang, async_module_data) = self._configure_module(module_name='async_wrapper', module_args=dict(), task_vars=task_vars)
self._transfer_data(async_module_path, async_module_data)
self._remote_chmod('a+rx', async_module_path)
argsfile = self._transfer_data(self._connection._shell.join_path(tmp, 'arguments'), json.dumps(module_args))
async_limit = self._task.async
async_jid = str(random.randint(0, 999999999999))
async_cmd = " ".join([str(x) for x in [env_string, async_module_path, async_jid, async_limit, remote_module_path, argsfile]])
result.update(self._low_level_execute_command(cmd=async_cmd))
# clean up after
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES:
self._remove_tmp_path(tmp)
result['changed'] = True
# be sure to strip out the BECOME-SUCCESS message, which may
# be there depending on the output of the module
result['stdout'] = self._strip_success_message(result.get('stdout', ''))
return result
| gpl-3.0 |
allancarlos123/Solfege | interface/main-menu/main-menu.py | 2 | 2314 | #!/usr/bin/env python
#coding: utf-8
from gi.repository import Gtk, Gdk
class Programa(Gtk.Window):
def eventCompInte(self, widget):
arquivo = open("desc_exercicios/ComparacaoDeIntervalos", "r")
self.texto = arquivo.read()
# Altera o texto do widget "descrição"
self.descExerc.set_text(self.texto)
#def eventIdentInte(self, widget):
#arquivo = open("desc_exercicios/IdentificacaoDeIntervalos.txt", "r")
#self.texto = arquivo.read()
#self.descExerc.set_text(self.texto
#def eventEntoInte(self, widget):
#arquivo = open("desc_exercicios/EntonacaoDeIntervalos.txt", "r")
#self.texto = arquivo.read()
#self.descExerc.set_text(self.texto)
def __init__(self):
Gtk.Window.__init__(self, title="Menu Principal - Escolha o Exercício")
builder = Gtk.Builder()
builder.add_from_file("layout.glade")
window = builder.get_object("window1")
window.maximize()
'''
=====================================================================================
Consertar o botão "Começar", que está sendo engolido pela barra inicial!
- Intervalos
- Ritmo
=====================================================================================
'''
# Chama o objeto do XML e ajusta o texto
self.descExerc = builder.get_object("descExerc")
self.descExerc.set_justify(2)
# Chama o objeto do XML e coloca um evento
btnCompInte = builder.get_object("btnCompInte")
btnCompInte.connect("clicked", self.eventCompInte)
#btnIdentInt = builder.get_object("btnIdentInt")
#btnIdentInt.connect("clicked", self.eventIdentInt)
#btnEntoInt = builder.get_object("btnEntoInt")
#btnEntoInt.connect("clicked", self.eventEntoInt)
# Destruir janela ao fechar
window.connect("delete-event", Gtk.main_quit)
window.show_all()
style_provider = Gtk.CssProvider()
css = open('style.css', 'rb')
css_data = css.read()
style_provider.load_from_data(css_data)
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(), style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
win = Programa()
#win.show_all()
Gtk.main()
| gpl-3.0 |
heke123/crosswalk | build/android/generate_xwalk_core_library.py | 3 | 12509 | #!/usr/bin/env python
#
# Copyright (c) 2013 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=F0401
import distutils.dir_util
import optparse
import os
import shutil
import sys
import zipfile
from common_function import RemoveUnusedFilesInReleaseMode
from xml.dom.minidom import Document
XWALK_CORE_SHELL_APK = 'xwalk_core_shell_apk'
def AddGeneratorOptions(option_parser):
option_parser.add_option('-s', dest='source',
help='Source directory of project root.',
type='string')
option_parser.add_option('-t', dest='target',
help='Product out target directory.',
type='string')
option_parser.add_option('--shared', action='store_true',
default=False,
help='Generate shared library', )
option_parser.add_option('--src-package', action='store_true',
default=False,
help='Use java sources instead of java libs.')
option_parser.add_option('--use-lzma', action='store_true',
default=False,
help='Use LZMA compress native library when specified')
def CleanLibraryProject(out_project_dir):
if os.path.exists(out_project_dir):
for item in os.listdir(out_project_dir):
sub_path = os.path.join(out_project_dir, item)
if os.path.isdir(sub_path):
shutil.rmtree(sub_path)
elif os.path.isfile(sub_path):
os.remove(sub_path)
def CopyProjectFiles(project_source, out_project_dir, shared):
print('Copying library project files...')
if shared :
template_dir = os.path.join(project_source, 'xwalk', 'build', 'android',
'xwalk_shared_library_template')
else :
template_dir = os.path.join(project_source, 'xwalk', 'build', 'android',
'xwalkcore_library_template')
files_to_copy = [
# AndroidManifest.xml from template.
'AndroidManifest.xml',
# Eclipse project properties from template.
'project.properties',
# Ant build file.
'build.xml',
# Ant properties file.
'ant.properties',
]
for f in files_to_copy:
source_file = os.path.join(template_dir, f)
target_file = os.path.join(out_project_dir, f)
shutil.copy2(source_file, target_file)
def CopyJSBindingFiles(project_source, out_project_dir):
print 'Copying js binding files...'
jsapi_dir = os.path.join(out_project_dir, 'res', 'raw')
if not os.path.exists(jsapi_dir):
os.makedirs(jsapi_dir)
jsfiles_to_copy = [
'xwalk/experimental/launch_screen/launch_screen_api.js',
'xwalk/runtime/android/core_internal/src/org/xwalk/core/'
+ 'internal/extension/api/contacts/contacts_api.js',
'xwalk/runtime/android/core_internal/src/org/xwalk/core/'
+ 'internal/extension/api/device_capabilities/device_capabilities_api.js',
'xwalk/runtime/android/core_internal/src/org/xwalk/core/'
+ 'internal/extension/api/messaging/messaging_api.js',
'xwalk/experimental/wifidirect/wifidirect_api.js'
]
# Copy JS binding file to assets/jsapi folder.
for jsfile in jsfiles_to_copy:
source_file = os.path.join(project_source, jsfile)
target_file = os.path.join(jsapi_dir, os.path.basename(source_file))
shutil.copyfile(source_file, target_file)
def CopyBinaries(out_dir, out_project_dir, src_package, shared):
# Copy jar files to libs.
libs_dir = os.path.join(out_project_dir, 'libs')
if not os.path.exists(libs_dir):
os.mkdir(libs_dir)
if shared:
libs_to_copy = ['xwalk_core_library_java_app_part.jar']
elif src_package:
libs_to_copy = ['jsr_305_javalib.jar', ]
else:
libs_to_copy = ['xwalk_core_library_java.jar', ]
for lib in libs_to_copy:
source_file = os.path.join(out_dir, 'lib.java', lib)
target_file = os.path.join(libs_dir, lib)
shutil.copyfile(source_file, target_file)
if shared:
return
print 'Copying binaries...'
# Copy assets.
res_raw_dir = os.path.join(out_project_dir, 'res', 'raw')
res_value_dir = os.path.join(out_project_dir, 'res', 'values')
if not os.path.exists(res_raw_dir):
os.mkdir(res_raw_dir)
if not os.path.exists(res_value_dir):
os.mkdir(res_value_dir)
paks_to_copy = [
'icudtl.dat',
# Please refer to XWALK-3516, disable v8 use external startup data,
# reopen it if needed later.
# 'natives_blob.bin',
# 'snapshot_blob.bin',
'xwalk.pak',
]
pak_list_xml = Document()
resources_node = pak_list_xml.createElement('resources')
string_array_node = pak_list_xml.createElement('string-array')
string_array_node.setAttribute('name', 'xwalk_resources_list')
pak_list_xml.appendChild(resources_node)
resources_node.appendChild(string_array_node)
for pak in paks_to_copy:
source_file = os.path.join(out_dir, pak)
target_file = os.path.join(res_raw_dir, pak)
shutil.copyfile(source_file, target_file)
item_node = pak_list_xml.createElement('item')
item_node.appendChild(pak_list_xml.createTextNode(pak))
string_array_node.appendChild(item_node)
pak_list_file = open(os.path.join(res_value_dir,
'xwalk_resources_list.xml'), 'w')
pak_list_xml.writexml(pak_list_file, newl='\n', encoding='utf-8')
pak_list_file.close()
# Copy native libraries.
source_dir = os.path.join(out_dir, XWALK_CORE_SHELL_APK, 'libs')
distutils.dir_util.copy_tree(source_dir, libs_dir)
def CopyDirAndPrefixDuplicates(input_dir, output_dir, prefix, blacklist=None):
""" Copy the files into the output directory. If one file in input_dir folder
doesn't exist, copy it directly. If a file exists, copy it and rename the
file so that the resources won't be overrided. So all of them could be
packaged into the xwalk core library.
"""
blacklist = blacklist or []
for root, _, files in os.walk(input_dir):
for f in files:
if f in blacklist:
continue
src_file = os.path.join(root, f)
relative_path = os.path.relpath(src_file, input_dir)
target_file = os.path.join(output_dir, relative_path)
target_dir_name = os.path.dirname(target_file)
if not os.path.exists(target_dir_name):
os.makedirs(target_dir_name)
# If the file exists, copy it and rename it with another name to
# avoid overwriting the existing one.
if os.path.exists(target_file):
target_base_name = os.path.basename(target_file)
target_base_name = prefix + '_' + target_base_name
target_file = os.path.join(target_dir_name, target_base_name)
shutil.copyfile(src_file, target_file)
def MoveImagesToNonMdpiFolders(res_root):
"""Move images from drawable-*-mdpi-* folders to drawable-* folders.
Why? http://crbug.com/289843
Copied from build/android/gyp/package_resources.py.
"""
for src_dir_name in os.listdir(res_root):
src_components = src_dir_name.split('-')
if src_components[0] != 'drawable' or 'mdpi' not in src_components:
continue
src_dir = os.path.join(res_root, src_dir_name)
if not os.path.isdir(src_dir):
continue
dst_components = [c for c in src_components if c != 'mdpi']
assert dst_components != src_components
dst_dir_name = '-'.join(dst_components)
dst_dir = os.path.join(res_root, dst_dir_name)
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
for src_file_name in os.listdir(src_dir):
if not src_file_name.endswith('.png'):
continue
src_file = os.path.join(src_dir, src_file_name)
dst_file = os.path.join(dst_dir, src_file_name)
assert not os.path.lexists(dst_file)
shutil.move(src_file, dst_file)
def ReplaceCrunchedImage(project_source, filename, filepath):
"""Replace crunched images with source images.
"""
search_dir = [
'components/web_contents_delegate_android/android/java/res',
'content/public/android/java/res',
'ui/android/java/res'
]
pathname = os.path.basename(filepath)
#replace crunched 9-patch image resources.
for search in search_dir:
absdir = os.path.join(project_source, search)
for dirname, _, files in os.walk(absdir):
if filename in files:
relativedir = os.path.basename(dirname)
if (pathname == 'drawable' and relativedir == 'drawable-mdpi') or \
relativedir == pathname:
source_file = os.path.abspath(os.path.join(dirname, filename))
target_file = os.path.join(filepath, filename)
shutil.copyfile(source_file, target_file)
return
def CopyResources(project_source, out_dir, out_project_dir, shared):
print 'Copying resources...'
res_dir = os.path.join(out_project_dir, 'res')
temp_dir = os.path.join(out_project_dir, 'temp')
if os.path.exists(res_dir):
shutil.rmtree(res_dir)
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
# All resources should be in specific folders in res_directory.
# Since there might be some resource files with same names from
# different folders like ui_java, content_java and others,
# it's necessary to rename some files to avoid overridding.
res_to_copy = [
# zip file list
'content_java.zip',
'content_strings_grd.zip',
'ui_java.zip',
'ui_strings_grd.zip',
'web_contents_delegate_android_java.zip',
'xwalk_core_internal_java.zip',
'xwalk_core_java.zip',
'xwalk_core_strings.zip',
'xwalk_app_strings.zip'
]
for res_zip in res_to_copy:
zip_file = os.path.join(out_dir, 'res.java', res_zip)
zip_name = os.path.splitext(res_zip)[0]
if not os.path.isfile(zip_file):
raise Exception('Resource zip not found: ' + zip_file)
subdir = os.path.join(temp_dir, zip_name)
if os.path.isdir(subdir):
raise Exception('Resource zip name conflict: ' + zip_name)
os.makedirs(subdir)
with zipfile.ZipFile(zip_file) as z:
z.extractall(path=subdir)
CopyDirAndPrefixDuplicates(subdir, res_dir, zip_name,
blacklist=['OWNERS'])
MoveImagesToNonMdpiFolders(res_dir)
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
#search 9-patch, then replace it with uncrunch image.
for dirname, _, files in os.walk(res_dir):
for filename in files:
if filename.endswith('.9.png'):
ReplaceCrunchedImage(project_source, filename, dirname)
def main(argv):
option_parser = optparse.OptionParser()
AddGeneratorOptions(option_parser)
options, _ = option_parser.parse_args(argv)
if not os.path.exists(options.source):
print 'Source project does not exist, please provide correct directory.'
sys.exit(1)
out_dir = options.target
if options.src_package:
if options.shared :
out_project_dir = os.path.join(out_dir, 'xwalk_shared_library_src')
else :
out_project_dir = os.path.join(out_dir, 'xwalk_core_library_src')
else:
if options.shared :
out_project_dir = os.path.join(out_dir, 'xwalk_shared_library')
else :
out_project_dir = os.path.join(out_dir, 'xwalk_core_library')
# Clean directory for project first.
CleanLibraryProject(out_project_dir)
if not os.path.exists(out_project_dir):
os.mkdir(out_project_dir)
# Copy Eclipse project files of library project.
CopyProjectFiles(options.source, out_project_dir, options.shared)
# Copy binaries and resuorces.
CopyResources(options.source, out_dir, out_project_dir, options.shared)
CopyBinaries(out_dir, out_project_dir, options.src_package, options.shared)
# Copy JS API binding files.
CopyJSBindingFiles(options.source, out_project_dir)
# Remove unused files.
mode = os.path.basename(os.path.normpath(out_dir))
RemoveUnusedFilesInReleaseMode(mode,
os.path.join(out_project_dir, 'libs'))
# Create empty src directory
src_dir = os.path.join(out_project_dir, 'src')
if not os.path.isdir(src_dir):
os.mkdir(src_dir)
readme = os.path.join(src_dir, 'README.md')
open(readme, 'w').write(
"# Source folder for xwalk library\n"
"## Why it's empty\n"
"xwalk library doesn't contain java sources.\n"
"## Why put me here\n"
"To make archives keep the folder, "
"the src directory is needed to build an apk by ant.")
print 'Your Android library project has been created at %s' % (
out_project_dir)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
toshywoshy/ansible | test/units/plugins/lookup/test_conjur_variable.py | 52 | 4748 | # -*- coding: utf-8 -*-
# (c) 2018, Jason Vanderhoof <jason.vanderhoof@cyberark.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from units.compat.mock import MagicMock
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves import http_client
from ansible.plugins.lookup import conjur_variable
import tempfile
class TestLookupModule:
def test_valid_netrc_file(self):
with tempfile.NamedTemporaryFile() as temp_netrc:
temp_netrc.write(b"machine http://localhost/authn\n")
temp_netrc.write(b" login admin\n")
temp_netrc.write(b" password my-pass\n")
temp_netrc.seek(0)
results = conjur_variable._load_identity_from_file(temp_netrc.name, 'http://localhost')
assert results['id'] == 'admin'
assert results['api_key'] == 'my-pass'
def test_netrc_without_host_file(self):
with tempfile.NamedTemporaryFile() as temp_netrc:
temp_netrc.write(b"machine http://localhost/authn\n")
temp_netrc.write(b" login admin\n")
temp_netrc.write(b" password my-pass\n")
temp_netrc.seek(0)
with pytest.raises(AnsibleError):
conjur_variable._load_identity_from_file(temp_netrc.name, 'http://foo')
def test_valid_configuration(self):
with tempfile.NamedTemporaryFile() as configuration_file:
configuration_file.write(b"---\n")
configuration_file.write(b"account: demo-policy\n")
configuration_file.write(b"plugins: []\n")
configuration_file.write(b"appliance_url: http://localhost:8080\n")
configuration_file.seek(0)
results = conjur_variable._load_conf_from_file(configuration_file.name)
assert results['account'] == 'demo-policy'
assert results['appliance_url'] == 'http://localhost:8080'
def test_valid_token_retrieval(self, mocker):
mock_response = MagicMock(spec_set=http_client.HTTPResponse)
try:
mock_response.getcode.return_value = 200
except Exception:
# HTTPResponse is a Python 3 only feature. This uses a generic mock for python 2.6
mock_response = MagicMock()
mock_response.getcode.return_value = 200
mock_response.read.return_value = 'foo-bar-token'
mocker.patch.object(conjur_variable, 'open_url', return_value=mock_response)
response = conjur_variable._fetch_conjur_token('http://conjur', 'account', 'username', 'api_key')
assert response == 'foo-bar-token'
def test_valid_fetch_conjur_variable(self, mocker):
mock_response = MagicMock(spec_set=http_client.HTTPResponse)
try:
mock_response.getcode.return_value = 200
except Exception:
# HTTPResponse is a Python 3 only feature. This uses a generic mock for python 2.6
mock_response = MagicMock()
mock_response.getcode.return_value = 200
mock_response.read.return_value = 'foo-bar'
mocker.patch.object(conjur_variable, 'open_url', return_value=mock_response)
response = conjur_variable._fetch_conjur_token('super-secret', 'token', 'http://conjur', 'account')
assert response == 'foo-bar'
def test_invalid_fetch_conjur_variable(self, mocker):
for code in [401, 403, 404]:
mock_response = MagicMock(spec_set=http_client.HTTPResponse)
try:
mock_response.getcode.return_value = code
except Exception:
# HTTPResponse is a Python 3 only feature. This uses a generic mock for python 2.6
mock_response = MagicMock()
mock_response.getcode.return_value = code
mocker.patch.object(conjur_variable, 'open_url', return_value=mock_response)
with pytest.raises(AnsibleError):
response = conjur_variable._fetch_conjur_token('super-secret', 'token', 'http://conjur', 'account')
| gpl-3.0 |
cchurch/ansible-modules-extras | cloud/amazon/s3_logging.py | 65 | 5644 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: s3_logging
short_description: Manage logging facility of an s3 bucket in AWS
description:
- Manage logging facility of an s3 bucket in AWS
version_added: "2.0"
author: Rob White (@wimnat)
options:
name:
description:
- "Name of the s3 bucket."
required: true
state:
description:
- "Enable or disable logging."
required: false
default: present
choices: [ 'present', 'absent' ]
target_bucket:
description:
- "The bucket to log to. Required when state=present."
required: false
default: null
target_prefix:
description:
- "The prefix that should be prepended to the generated log files written to the target_bucket."
required: false
default: ""
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
s3_logging:
name: mywebsite.com
target_bucket: mylogs
target_prefix: logs/mywebsite.com
state: present
- name: Remove logging on an s3 bucket
s3_logging:
name: mywebsite.com
state: absent
'''
try:
import boto.ec2
from boto.s3.connection import OrdinaryCallingFormat, Location
from boto.exception import BotoServerError, S3CreateError, S3ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def compare_bucket_logging(bucket, target_bucket, target_prefix):
bucket_log_obj = bucket.get_logging_status()
if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix:
return False
else:
return True
def enable_bucket_logging(connection, module):
bucket_name = module.params.get("name")
target_bucket = module.params.get("target_bucket")
target_prefix = module.params.get("target_prefix")
changed = False
try:
bucket = connection.get_bucket(bucket_name)
except S3ResponseError as e:
module.fail_json(msg=e.message)
try:
if not compare_bucket_logging(bucket, target_bucket, target_prefix):
# Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket
try:
target_bucket_obj = connection.get_bucket(target_bucket)
except S3ResponseError as e:
if e.status == 301:
module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged")
else:
module.fail_json(msg=e.message)
target_bucket_obj.set_as_logging_target()
bucket.enable_logging(target_bucket, target_prefix)
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def disable_bucket_logging(connection, module):
bucket_name = module.params.get("name")
changed = False
try:
bucket = connection.get_bucket(bucket_name)
if not compare_bucket_logging(bucket, None, None):
bucket.disable_logging()
changed = True
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name = dict(required=True),
target_bucket = dict(required=False, default=None),
target_prefix = dict(required=False, default=""),
state = dict(required=False, default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
try:
connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if connection is None:
connection = boto.connect_s3(**aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
state = module.params.get("state")
if state == 'present':
enable_bucket_logging(connection, module)
elif state == 'absent':
disable_bucket_logging(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
murfz/Sick-Beard | lib/requests/packages/chardet2/sbcharsetprober.py | 52 | 4582 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
self._mReversed = reversed # TRUE if we need to reverse every pair in the model lookup
self._mNameProber = nameProber # Optional auxiliary prober for name decision
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mLastOrder = 255 # char order of last character
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
self._mFreqChar = 0 # characters that fall in our sampling range
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][c]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
self._mSeqCounters[self._mModel['precedenceMatrix'][(self._mLastOrder * SAMPLE_SIZE) + order]] += 1
else: # reverse the order of the letters in the lookup
self._mSeqCounters[self._mModel['precedenceMatrix'][(order * SAMPLE_SIZE) + self._mLastOrder]] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a winner\n' % (self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative shortcut threshhold %s\n' % (self._mModel['charsetName'], cf, NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
# print self._mSeqCounters[POSITIVE_CAT], self._mTotalSeqs, self._mModel['mTypicalPositiveRatio']
r = (1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs / self._mModel['mTypicalPositiveRatio']
# print r, self._mFreqChar, self._mTotalChar
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| gpl-3.0 |
Vixionar/django | tests/annotations/models.py | 238 | 2901 | # coding: utf-8
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
age = models.IntegerField()
friends = models.ManyToManyField('self', blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Publisher(models.Model):
name = models.CharField(max_length=255)
num_awards = models.IntegerField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Book(models.Model):
isbn = models.CharField(max_length=9)
name = models.CharField(max_length=255)
pages = models.IntegerField()
rating = models.FloatField()
price = models.DecimalField(decimal_places=2, max_digits=6)
authors = models.ManyToManyField(Author)
contact = models.ForeignKey(Author, models.CASCADE, related_name='book_contact_set')
publisher = models.ForeignKey(Publisher, models.CASCADE)
pubdate = models.DateField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Store(models.Model):
name = models.CharField(max_length=255)
books = models.ManyToManyField(Book)
original_opening = models.DateTimeField()
friday_night_closing = models.TimeField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class DepartmentStore(Store):
chain = models.CharField(max_length=255)
def __str__(self):
return '%s - %s ' % (self.chain, self.name)
@python_2_unicode_compatible
class Employee(models.Model):
# The order of these fields matter, do not change. Certain backends
# rely on field ordering to perform database conversions, and this
# model helps to test that.
first_name = models.CharField(max_length=20)
manager = models.BooleanField(default=False)
last_name = models.CharField(max_length=20)
store = models.ForeignKey(Store, models.CASCADE)
age = models.IntegerField()
salary = models.DecimalField(max_digits=8, decimal_places=2)
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=200)
motto = models.CharField(max_length=200, null=True, blank=True)
ticker_name = models.CharField(max_length=10, null=True, blank=True)
description = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return ('Company(name=%s, motto=%s, ticker_name=%s, description=%s)'
% (self.name, self.motto, self.ticker_name, self.description)
)
@python_2_unicode_compatible
class Ticket(models.Model):
active_at = models.DateTimeField()
duration = models.DurationField()
def __str__(self):
return '{} - {}'.format(self.active_at, self.duration)
| bsd-3-clause |
thanhacun/odoo | addons/hr_holidays/report/available_holidays.py | 892 | 1046 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
suiyuan2009/tensorflow | tensorflow/python/layers/base_test.py | 10 | 14518 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as base_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class BaseLayerTest(test.TestCase):
def testLayerProperties(self):
layer = base_layers.Layer(name='my_layer')
self.assertListEqual(layer.variables, [])
self.assertListEqual(layer.trainable_variables, [])
self.assertListEqual(layer.non_trainable_variables, [])
self.assertListEqual(layer.updates, [])
self.assertListEqual(layer.losses, [])
self.assertEqual(layer.built, False)
layer = base_layers.Layer(name='my_layer', trainable=False)
self.assertEqual(layer.trainable, False)
def testAddWeight(self):
with self.test_session():
layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
variable = layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'my_layer/my_var:0')
self.assertListEqual(layer.variables, [variable])
self.assertListEqual(layer.trainable_variables, [variable])
self.assertListEqual(layer.non_trainable_variables, [])
self.assertListEqual(
layer.variables,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Test non-trainable variable creation.
# layer.add_variable should work even outside `build` and `call`.
variable_2 = layer.add_variable(
'non_trainable_var', [2, 2],
initializer=init_ops.zeros_initializer(),
trainable=False)
self.assertListEqual(layer.variables, [variable, variable_2])
self.assertListEqual(layer.trainable_variables, [variable])
self.assertListEqual(layer.non_trainable_variables, [variable_2])
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
# Test with regularizer.
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
variable = layer.add_variable(
'reg_var', [2, 2],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(len(layer.losses), 1)
def testGetVariable(self):
with self.test_session():
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
def call(self, inputs):
return inputs * 2
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((5,), seed=1)
layer.apply(inputs)
layer.apply(inputs)
self.assertListEqual([v.name for v in layer.variables],
['my_layer/my_var:0'])
# Creating a layer with no scope leads to lazy construction of
# the scope at apply() time. It uses scope "<current scope>/base_name"
lazy_layer = MyLayer(_reuse=True)
with variable_scope.variable_scope('new_scope'):
# This should attempt to reuse 'my_var' in 'new_scope'
with self.assertRaisesRegexp(
ValueError, r'new_scope/my_layer/my_var does not exist'):
lazy_layer.apply(inputs)
with variable_scope.variable_scope('my_layer'):
variable_scope.get_variable('my_var', [2, 2])
# Smoke test: it runs.
lazy_layer.apply(inputs)
# The variables were created outside of the Layer, and
# reuse=True, so the Layer does not own them and they are not
# stored in its collection.
self.assertListEqual(lazy_layer.variables, [])
self.assertEqual(lazy_layer._scope.name, 'new_scope/my_layer')
# Creating a layer with no scope leads to lazy construction of
# the scope at apply() time. If 'scope' argument is passed to
# apply(), it uses that scope when accessing variables.
lazy_layer = MyLayer(_reuse=True)
with variable_scope.variable_scope('new_scope') as new_scope:
# This should attempt to reuse 'my_var' in 'new_scope'
with self.assertRaisesRegexp(
ValueError, r'new_scope/my_var does not exist'):
lazy_layer.apply(inputs, scope=new_scope)
variable_scope.get_variable('my_var', [2, 2])
# Smoke test: it runs.
lazy_layer.apply(inputs, scope=new_scope)
# The variables were created outside of the Layer, and
# reuse=True, so the Layer does not own them and they are not
# stored in its collection.
self.assertListEqual(lazy_layer.variables, [])
self.assertEqual(lazy_layer._scope.name, 'new_scope')
with ops.Graph().as_default():
inputs_ng = random_ops.random_uniform((5,), seed=1)
with self.assertRaisesRegexp(ValueError,
r'graph are not the same'):
layer.apply(inputs_ng)
def testCall(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
self.assertEqual(outputs.op.name, 'my_layer/Square')
def testFirstCallCanCreateVariablesButSecondCanNotWhenBuildEmpty(self):
class MyLayer(base_layers.Layer):
def build(self, _):
# Do not mark the layer as built.
pass
def call(self, inputs):
self.my_var = self.add_variable('my_var', [2, 2])
if self.built:
# Skip creating on the first call; try to create after it's
# built. This is expected to fail.
self.add_variable('this_will_break_on_second_call', [2, 2])
return inputs + math_ops.square(self.my_var)
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((2,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
self.assertEqual(outputs.op.name, 'my_layer/add')
self.assertListEqual(
[v.name for v in layer.variables], ['my_layer/my_var:0'])
with self.assertRaisesRegexp(ValueError,
'my_layer/this_will_break_on_second_call'):
layer.apply(inputs)
# The list of variables hasn't changed.
self.assertListEqual(
[v.name for v in layer.variables], ['my_layer/my_var:0'])
def testDeepCopy(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
layer._private_tensor = random_ops.random_uniform(())
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
self.assertEqual(outputs.op.name, 'my_layer/Square')
layer_copy = copy.deepcopy(layer)
self.assertEqual(layer_copy.name, layer.name)
self.assertEqual(layer_copy._scope.name, layer._scope.name)
self.assertEqual(layer_copy._graph, layer._graph)
self.assertEqual(layer_copy._private_tensor, layer._private_tensor)
def testScopeNaming(self):
class PrivateLayer(base_layers.Layer):
def call(self, inputs):
return None
inputs = random_ops.random_uniform((5,))
default_layer = PrivateLayer()
_ = default_layer.apply(inputs)
self.assertEqual(default_layer._scope.name, 'private_layer')
default_layer1 = PrivateLayer()
default_layer1.apply(inputs)
self.assertEqual(default_layer1._scope.name, 'private_layer_1')
my_layer = PrivateLayer(name='my_layer')
my_layer.apply(inputs)
self.assertEqual(my_layer._scope.name, 'my_layer')
my_layer1 = PrivateLayer(name='my_layer')
my_layer1.apply(inputs)
self.assertEqual(my_layer1._scope.name, 'my_layer_1')
my_layer2 = PrivateLayer(name='my_layer')
my_layer2.apply(inputs)
self.assertEqual(my_layer2._scope.name, 'my_layer_2')
# Name scope shouldn't affect names.
with ops.name_scope('some_name_scope'):
default_layer2 = PrivateLayer()
default_layer2.apply(inputs)
self.assertEqual(default_layer2._scope.name, 'private_layer_2')
my_layer3 = PrivateLayer(name='my_layer')
my_layer3.apply(inputs)
self.assertEqual(my_layer3._scope.name, 'my_layer_3')
other_layer = PrivateLayer(name='other_layer')
other_layer.apply(inputs)
self.assertEqual(other_layer._scope.name, 'other_layer')
# Variable scope gets added to scope names.
with variable_scope.variable_scope('var_scope'):
default_layer_scoped = PrivateLayer()
default_layer_scoped.apply(inputs)
self.assertEqual(default_layer_scoped._scope.name,
'var_scope/private_layer')
my_layer_scoped = PrivateLayer(name='my_layer')
my_layer_scoped.apply(inputs)
self.assertEqual(my_layer_scoped._scope.name, 'var_scope/my_layer')
my_layer_scoped1 = PrivateLayer(name='my_layer')
my_layer_scoped1.apply(inputs)
self.assertEqual(my_layer_scoped1._scope.name, 'var_scope/my_layer_1')
def testInputSpecNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(ndim=2)
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError,
r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
with self.assertRaisesRegexp(ValueError,
r'expected ndim=2'):
layer.apply(array_ops.placeholder('int32', shape=(None,)))
# Works
layer.apply(array_ops.placeholder('int32', shape=(None, None)))
def testInputSpecMinNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(min_ndim=2)
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError,
r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
with self.assertRaisesRegexp(ValueError,
r'expected min_ndim=2'):
layer.apply(array_ops.placeholder('int32', shape=(None,)))
# Works
layer.apply(array_ops.placeholder('int32', shape=(None, None)))
layer.apply(array_ops.placeholder('int32', shape=(None, None, None)))
def testInputSpecMaxNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(max_ndim=2)
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError,
r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
with self.assertRaisesRegexp(ValueError,
r'expected max_ndim=2'):
layer.apply(array_ops.placeholder('int32', shape=(None, None, None)))
# Works
layer.apply(array_ops.placeholder('int32', shape=(None, None)))
layer.apply(array_ops.placeholder('int32', shape=(None,)))
def testInputSpecDtypeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(dtype='float32')
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError,
r'expected dtype=float32'):
layer.apply(array_ops.placeholder('int32'))
# Works
layer.apply(array_ops.placeholder('float32', shape=(None, None)))
def testInputSpecAxesCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(axes={-1: 2})
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError,
r'expected axis'):
layer.apply(array_ops.placeholder('int32', shape=(None, 3)))
# Works
layer.apply(array_ops.placeholder('int32', shape=(None, None, 2)))
layer.apply(array_ops.placeholder('int32', shape=(None, 2)))
def testInputSpecShapeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(shape=(None, 3))
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError,
r'expected shape'):
layer.apply(array_ops.placeholder('int32', shape=(None, 2)))
# Works
layer.apply(array_ops.placeholder('int32', shape=(None, 3)))
layer.apply(array_ops.placeholder('int32', shape=(2, 3)))
def testNoInputSpec(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = None
def call(self, inputs):
return inputs
layer = CustomerLayer()
# Works
layer.apply(array_ops.placeholder('int32'))
layer.apply(array_ops.placeholder('int32', shape=(2, 3)))
if __name__ == '__main__':
test.main()
| apache-2.0 |
synctree/synctree-awsebcli | ebcli/objects/tier.py | 5 | 2564 | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ..objects.exceptions import NotFoundError
import re
class Tier():
def __init__(self, name, typ, version):
self.name = name
self.type = typ
self.version = version.strip()
self.string = self.__str__()
def to_struct(self):
strct = {
'Name': self.name,
'Type': self.type,
}
if self.version:
strct['Version'] = self.version
return strct
def __str__(self):
s = self.name + '-' + self.type
if self.version:
s += '-' + self.version
return s
def __eq__(self, other):
if not isinstance(other, Tier):
return False
return self.string.lower() == other.string.lower()
@staticmethod
def get_all_tiers():
lst = [
Tier('WebServer', 'Standard', '1.0'),
Tier('Worker', 'SQS/HTTP', '1.0'),
Tier('Worker', 'SQS/HTTP', '1.1'),
Tier('Worker', 'SQS/HTTP', ''),
]
return lst
@staticmethod
def parse_tier(string):
if string.lower() == 'web' or string.lower() == 'webserver':
return Tier('WebServer', 'Standard', '1.0')
if string.lower() == 'worker':
return Tier('Worker', 'SQS/HTTP', '')
params = string.split('-')
if len(params) == 3:
name, typ, version = string.split('-')
elif len(params) == 2:
name, typ = string.split('-')
if re.match('\d+[.]\d+', typ):
version = typ
else:
version = ''
else:
raise NotFoundError('Tier Not found')
# we want to return the Proper, uppercase version
if name.lower() == 'webserver' or name.lower() == 'web':
return Tier('WebServer', 'Standard', version)
elif name.lower() == 'worker':
return Tier('Worker', 'SQS/HTTP', version)
# tier not found
raise NotFoundError('Tier Not found') | apache-2.0 |
Gitlab11/odoo | addons/product_extended/__init__.py | 374 | 1068 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_extended
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
myerpengine/odoo | addons/decimal_precision/tests/test_qweb_float.py | 103 | 2000 | # -*- coding: utf-8 -*-
from openerp.tests import common
class TestFloatExport(common.TransactionCase):
def setUp(self):
super(TestFloatExport, self).setUp()
self.Model = self.registry('decimal.precision.test')
def get_converter(self, name):
converter = self.registry('ir.qweb.field.float')
column = self.Model._all_columns[name].column
return lambda value, options=None: converter.value_to_html(
self.cr, self.uid, value, column, options=options, context=None)
def test_basic_float(self):
converter = self.get_converter('float')
self.assertEqual(
converter(42.0),
"42.0")
self.assertEqual(
converter(42.12345),
"42.12345")
converter = self.get_converter('float_2')
self.assertEqual(
converter(42.0),
"42.00")
self.assertEqual(
converter(42.12345),
"42.12")
converter = self.get_converter('float_4')
self.assertEqual(
converter(42.0),
'42.0000')
self.assertEqual(
converter(42.12345),
'42.1234')
def test_precision_domain(self):
DP = self.registry('decimal.precision')
DP.create(self.cr, self.uid, {
'name': 'A',
'digits': 2,
})
DP.create(self.cr, self.uid, {
'name': 'B',
'digits': 6,
})
converter = self.get_converter('float')
self.assertEqual(
converter(42.0, {'decimal_precision': 'A'}),
'42.00')
self.assertEqual(
converter(42.0, {'decimal_precision': 'B'}),
'42.000000')
converter = self.get_converter('float_4')
self.assertEqual(
converter(42.12345, {'decimal_precision': 'A'}),
'42.12')
self.assertEqual(
converter(42.12345, {'decimal_precision': 'B'}),
'42.123450')
| agpl-3.0 |
Comcast/neutron | neutron/plugins/linuxbridge/common/constants.py | 9 | 1525 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
from neutron.plugins.common import constants as p_const
FLAT_VLAN_ID = -1
LOCAL_VLAN_ID = -2
# Supported VXLAN features
VXLAN_NONE = 'not_supported'
VXLAN_MCAST = 'multicast_flooding'
VXLAN_UCAST = 'unicast_flooding'
# Corresponding minimal kernel versions requirements
MIN_VXLAN_KVER = {VXLAN_MCAST: '3.8', VXLAN_UCAST: '3.11'}
# TODO(rkukura): Eventually remove this function, which provides
# temporary backward compatibility with pre-Havana RPC and DB vlan_id
# encoding.
def interpret_vlan_id(vlan_id):
"""Return (network_type, segmentation_id) tuple for encoded vlan_id."""
if vlan_id == LOCAL_VLAN_ID:
return (p_const.TYPE_LOCAL, None)
elif vlan_id == FLAT_VLAN_ID:
return (p_const.TYPE_FLAT, None)
else:
return (p_const.TYPE_VLAN, vlan_id)
| apache-2.0 |
whiskey/Machine-Learning | de/staticline/classification/dummys.py | 1 | 1718 | '''
Created on Apr 29, 2011
@author: Carsten Witzke
'''
from de.staticline.tools.libsvmtools import LibsvmFileImporter
import math
class Always1Predictor(object):
'''
A dummy predictor assuming always "1" for each instance.
'''
def __init__(self):
pass
def buildClassifier(self, trainFile):
'''"builds" a classification model returning always 1 for each instance'''
train = LibsvmFileImporter(trainFile).get_dataSet()
self.__inst_train = train.get_numInstances()
# no training needed
def validateModel(self, testFile):
testdata = LibsvmFileImporter(testFile).get_dataSet()
self.__inst_test = testdata.get_numInstances()
## --- statistics
correct = 0.
sum_error = 0
for i in testdata.get_targets():
if i == 1: #correct
correct += 1.
else:
sum_error += math.pow(1 - i, 2)
# percent correct
self.__pct_correct = 100 * (correct/self.__inst_test)
# root mean squared error
self.__rmse = math.sqrt(sum_error / self.__inst_test)
def get_pctCorrect(self):
return self.__pct_correct
def get_rmse(self):
return self.__rmse
def get_inst_train(self):
return self.__inst_train
def get_inst_test(self):
return self.__inst_test
#properties
inst_train = property(get_inst_train, doc='number of training instances')
inst_test = property(get_inst_test, doc='number of test instances')
pct_correct = property(get_pctCorrect, doc='the percentage of correct instances')
rmse = property(get_rmse, doc='the root mean squared error')
| mit |
TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/widgets/syncmlwarning.py | 1 | 2136 | '''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import wx
from taskcoachlib.i18n import _
class SyncMLWarningDialog(wx.Dialog):
def __init__(self, parent):
super(SyncMLWarningDialog, self).__init__(parent, wx.ID_ANY, _('Compatibility warning'))
textWidget = wx.StaticText(self, wx.ID_ANY,
_('The SyncML feature is disabled, because the module\n'
'could not be loaded. This may be because your platform\n'
'is not supported, or under Windows, you may be missing\n'
'some mandatory DLLs. Please see the SyncML section of\n'
'the online help for details (under "Troubleshooting").'))
self.checkbox = wx.CheckBox(self, wx.ID_ANY, _('Never show this dialog again'))
self.checkbox.SetValue(True)
button = wx.Button(self, wx.ID_ANY, _('OK'))
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(textWidget, 0, wx.ALL, 10)
sizer.Add(self.checkbox, 0, wx.ALL, 3)
sizer.Add(button, 0, wx.ALL|wx.ALIGN_CENTRE, 3)
self.SetSizer(sizer)
wx.EVT_BUTTON(button, wx.ID_ANY, self.OnOK)
wx.EVT_CLOSE(self, self.OnOK)
self.Fit()
def OnOK(self, event):
if self.checkbox.IsChecked():
self.EndModal(wx.ID_OK)
else:
self.EndModal(wx.ID_CANCEL)
| gpl-3.0 |
mattstock/binutils-bexkat1 | gdb/python/lib/gdb/prompt.py | 5 | 4209 | # Extended prompt utilities.
# Copyright (C) 2011-2020 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Extended prompt library functions."""
import gdb
import os
def _prompt_pwd(ignore):
"The current working directory."
return os.getcwd()
def _prompt_object_attr(func, what, attr, nattr):
"""Internal worker for fetching GDB attributes."""
if attr is None:
attr = nattr
try:
obj = func()
except gdb.error:
return '<no %s>' % what
if hasattr(obj, attr):
result = getattr(obj, attr)
if callable(result):
result = result()
return result
else:
return '<no attribute %s on current %s>' % (attr, what)
def _prompt_frame(attr):
"The selected frame; an argument names a frame parameter."
return _prompt_object_attr(gdb.selected_frame, 'frame', attr, 'name')
def _prompt_thread(attr):
"The selected thread; an argument names a thread parameter."
return _prompt_object_attr(gdb.selected_thread, 'thread', attr, 'num')
def _prompt_version(attr):
"The version of GDB."
return gdb.VERSION
def _prompt_esc(attr):
"The ESC character."
return '\033'
def _prompt_bs(attr):
"A backslash."
return '\\'
def _prompt_n(attr):
"A newline."
return '\n'
def _prompt_r(attr):
"A carriage return."
return '\r'
def _prompt_param(attr):
"A parameter's value; the argument names the parameter."
return gdb.parameter(attr)
def _prompt_noprint_begin(attr):
"Begins a sequence of non-printing characters."
return '\001'
def _prompt_noprint_end(attr):
"Ends a sequence of non-printing characters."
return '\002'
prompt_substitutions = {
'e': _prompt_esc,
'\\': _prompt_bs,
'n': _prompt_n,
'r': _prompt_r,
'v': _prompt_version,
'w': _prompt_pwd,
'f': _prompt_frame,
't': _prompt_thread,
'p': _prompt_param,
'[': _prompt_noprint_begin,
']': _prompt_noprint_end
}
def prompt_help():
"""Generate help dynamically from the __doc__ strings of attribute
functions."""
result = ''
keys = sorted (prompt_substitutions.keys())
for key in keys:
result += ' \\%s\t%s\n' % (key, prompt_substitutions[key].__doc__)
result += """
A substitution can be used in a simple form, like "\\f".
An argument can also be passed to it, like "\\f{name}".
The meaning of the argument depends on the particular substitution."""
return result
def substitute_prompt(prompt):
"Perform substitutions on PROMPT."
result = ''
plen = len(prompt)
i = 0
while i < plen:
if prompt[i] == '\\':
i = i + 1
if i >= plen:
break
cmdch = prompt[i]
if cmdch in prompt_substitutions:
cmd = prompt_substitutions[cmdch]
if i + 1 < plen and prompt[i + 1] == '{':
j = i + 1
while j < plen and prompt[j] != '}':
j = j + 1
# Just ignore formatting errors.
if j >= plen or prompt[j] != '}':
arg = None
else:
arg = prompt[i + 2 : j]
i = j
else:
arg = None
result += str(cmd(arg))
else:
# Unrecognized escapes are turned into the escaped
# character itself.
result += prompt[i]
else:
result += prompt[i]
i = i + 1
return result
| gpl-2.0 |
computersalat/ansible | lib/ansible/module_utils/facts/packages.py | 51 | 2601 | # (c) 2018, Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from abc import ABCMeta, abstractmethod
from ansible.module_utils.six import with_metaclass
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common._utils import get_all_subclasses
def get_all_pkg_managers():
return dict([(obj.__name__.lower(), obj) for obj in get_all_subclasses(PkgMgr) if obj not in (CLIMgr, LibMgr)])
class PkgMgr(with_metaclass(ABCMeta, object)):
@abstractmethod
def is_available(self):
# This method is supposed to return True/False if the package manager is currently installed/usable
# It can also 'prep' the required systems in the process of detecting availability
pass
@abstractmethod
def list_installed(self):
# This method should return a list of installed packages, each list item will be passed to get_package_details
pass
@abstractmethod
def get_package_details(self, package):
# This takes a 'package' item and returns a dictionary with the package information, name and version are minimal requirements
pass
def get_packages(self):
# Take all of the above and return a dictionary of lists of dictionaries (package = list of installed versions)
installed_packages = {}
for package in self.list_installed():
package_details = self.get_package_details(package)
if 'source' not in package_details:
package_details['source'] = self.__class__.__name__.lower()
name = package_details['name']
if name not in installed_packages:
installed_packages[name] = [package_details]
else:
installed_packages[name].append(package_details)
return installed_packages
class LibMgr(PkgMgr):
LIB = None
def __init__(self):
self._lib = None
super(LibMgr, self).__init__()
def is_available(self):
found = False
try:
self._lib = __import__(self.LIB)
found = True
except ImportError:
pass
return found
class CLIMgr(PkgMgr):
CLI = None
def __init__(self):
self._cli = None
super(CLIMgr, self).__init__()
def is_available(self):
try:
self._cli = get_bin_path(self.CLI)
except ValueError:
return False
return True
| gpl-3.0 |
Pennebaker/wagtail | wagtail/contrib/wagtailroutablepage/tests.py | 9 | 8024 | import unittest
import warnings
import django
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import NoReverseMatch
from wagtail.wagtailcore.models import Page, Site
from wagtail.tests.routablepage.models import OldStyleRoutablePageTest, NewStyleRoutablePageTest, routable_page_external_view
from wagtail.tests.utils import WagtailTestUtils
from wagtail.contrib.wagtailroutablepage.templatetags.wagtailroutablepage_tags import routablepageurl
from wagtail.contrib.wagtailroutablepage.models import RoutablePageMixin
from wagtail.utils.deprecation import RemovedInWagtail12Warning
class TestNewStyleRoutablePage(TestCase):
model = NewStyleRoutablePageTest
def setUp(self):
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=self.model(
title="Routable Page",
slug='routable-page',
live=True,
))
def test_resolve_main_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/')
self.assertEqual(view, self.routable_page.main)
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_resolve_archive_by_year_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/archive/year/2014/')
self.assertEqual(view, self.routable_page.archive_by_year)
self.assertEqual(args, ('2014', ))
self.assertEqual(kwargs, {})
def test_resolve_archive_by_author_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/archive/author/joe-bloggs/')
self.assertEqual(view, self.routable_page.archive_by_author)
self.assertEqual(args, ())
self.assertEqual(kwargs, {'author_slug': 'joe-bloggs'})
def test_resolve_external_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/external/joe-bloggs/')
self.assertEqual(view, self.routable_page.external_view)
self.assertEqual(args, ('joe-bloggs', ))
self.assertEqual(kwargs, {})
def test_resolve_external_view_other_route(self):
view, args, kwargs = self.routable_page.resolve_subpage('/external-no-arg/')
self.assertEqual(view, self.routable_page.external_view)
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_reverse_main_view(self):
url = self.routable_page.reverse_subpage('main')
self.assertEqual(url, '')
def test_reverse_archive_by_year_view(self):
url = self.routable_page.reverse_subpage('archive_by_year', args=('2014', ))
self.assertEqual(url, 'archive/year/2014/')
def test_reverse_archive_by_author_view(self):
url = self.routable_page.reverse_subpage('archive_by_author', kwargs={'author_slug': 'joe-bloggs'})
self.assertEqual(url, 'archive/author/joe-bloggs/')
def test_reverse_overridden_name(self):
url = self.routable_page.reverse_subpage('name_overridden')
self.assertEqual(url, 'override-name-test/')
def test_reverse_overridden_name_default_doesnt_work(self):
with self.assertRaises(NoReverseMatch):
self.routable_page.reverse_subpage('override_name_test')
def test_reverse_external_view(self):
url = self.routable_page.reverse_subpage('external_view', args=('joe-bloggs', ))
self.assertEqual(url, 'external/joe-bloggs/')
def test_reverse_external_view_other_route(self):
url = self.routable_page.reverse_subpage('external_view')
self.assertEqual(url, 'external-no-arg/')
def test_get_main_view(self):
response = self.client.get(self.routable_page.url)
self.assertContains(response, "MAIN VIEW")
def test_get_archive_by_year_view(self):
response = self.client.get(self.routable_page.url + 'archive/year/2014/')
self.assertContains(response, "ARCHIVE BY YEAR: 2014")
def test_get_archive_by_author_view(self):
response = self.client.get(self.routable_page.url + 'archive/author/joe-bloggs/')
self.assertContains(response, "ARCHIVE BY AUTHOR: joe-bloggs")
def test_get_external_view(self):
response = self.client.get(self.routable_page.url + 'external/joe-bloggs/')
self.assertContains(response, "EXTERNAL VIEW: joe-bloggs")
def test_get_external_view_other_route(self):
response = self.client.get(self.routable_page.url + 'external-no-arg/')
self.assertContains(response, "EXTERNAL VIEW: ARG NOT SET")
@unittest.skipIf(django.VERSION >= (1, 8), "Old style routable pages don't work on Django 1.8")
class TestOldStyleRoutablePage(TestNewStyleRoutablePage, WagtailTestUtils):
model = OldStyleRoutablePageTest
def test_resolve_external_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/external/joe-bloggs/')
self.assertEqual(view, routable_page_external_view)
self.assertEqual(args, ('joe-bloggs', ))
self.assertEqual(kwargs, {})
test_resolve_external_view_other_route = None
test_reverse_external_view_other_route = None
test_get_external_view_other_route = None
test_reverse_overridden_name = None
test_reverse_overridden_name_default_doesnt_work = None
def test_deprecation_warning(self):
from django.conf.urls import url
class TestPageModel(RoutablePageMixin, Page):
subpage_urls = (
url('r^$', 'main'),
)
def main(self, request):
pass
# prevent this class appearing in the global PAGE_MODEL_CLASSES list, as
# its non-standard location causes failures when translating from content types
# back to models
class Meta:
abstract = True
# Calling check() should raise a deprecation warning
# This would usually be called by Django when it loads
self.reset_warning_registry()
with warnings.catch_warnings(record=True) as w:
TestPageModel.check()
# Check that a RemovedInWagtail12Warning has been triggered
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RemovedInWagtail12Warning))
self.assertEqual("wagtailroutablepage.TestPageModel: subpage_urls "
"is deprecated. Use the @route decorator to "
"define page routes instead.", str(w[-1].message))
class TestRoutablePageTemplateTag(TestCase):
def setUp(self):
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=NewStyleRoutablePageTest(
title="Routable Page",
slug='routable-page',
live=True,
))
self.rf = RequestFactory()
self.request = self.rf.get(self.routable_page.url)
self.request.site = Site.find_for_request(self.request)
self.context = {'request': self.request}
def test_templatetag_reverse_main_view(self):
url = routablepageurl(self.context, self.routable_page,
'main')
self.assertEqual(url, self.routable_page.url)
def test_templatetag_reverse_archive_by_year_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_year', '2014')
self.assertEqual(url, self.routable_page.url + 'archive/year/2014/')
def test_templatetag_reverse_archive_by_author_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_author', author_slug='joe-bloggs')
self.assertEqual(url, self.routable_page.url + 'archive/author/joe-bloggs/')
def test_templatetag_reverse_external_view(self):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
self.assertEqual(url, self.routable_page.url + 'external/joe-bloggs/')
| bsd-3-clause |
SOKP/external_chromium_org | tools/telemetry/build/update_docs.py | 103 | 4616 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import os
import pkgutil
import pydoc
import re
import sys
import telemetry
from telemetry.core import util
telemetry_dir = util.GetTelemetryDir()
docs_dir = os.path.join(telemetry_dir, 'docs')
def RemoveAllDocs():
for dirname, _, filenames in os.walk(docs_dir):
for filename in filenames:
os.remove(os.path.join(dirname, filename))
def GenerateHTMLForModule(module):
html = pydoc.html.page(pydoc.describe(module),
pydoc.html.document(module, module.__name__))
# pydoc writes out html with links in a variety of funky ways. We need
# to fix them up.
assert not telemetry_dir.endswith(os.sep)
links = re.findall('(<a href="(.+?)">(.+?)</a>)', html)
for link_match in links:
link, href, link_text = link_match
if not href.startswith('file:'):
continue
new_href = href.replace('file:', '')
new_href = new_href.replace(telemetry_dir, os.pardir)
new_href = new_href.replace(os.sep, '/')
new_link_text = link_text.replace(telemetry_dir + os.sep, '')
new_link = '<a href="%s">%s</a>' % (new_href, new_link_text)
html = html.replace(link, new_link)
# pydoc writes out html with absolute path file links. This is not suitable
# for checked in documentation. So, fix up the HTML after it is generated.
#html = re.sub('href="file:%s' % telemetry_dir, 'href="..', html)
#html = re.sub(telemetry_dir + os.sep, '', html)
return html
def WriteHTMLForModule(module):
page = GenerateHTMLForModule(module)
path = os.path.join(docs_dir, '%s.html' % module.__name__)
with open(path, 'w') as f:
sys.stderr.write('Wrote %s\n' % os.path.relpath(path))
f.write(page)
def GetAllModulesToDocument(module):
modules = [module]
for _, modname, _ in pkgutil.walk_packages(
module.__path__, module.__name__ + '.'):
if modname.endswith('_unittest'):
logging.debug("skipping %s due to being a unittest", modname)
continue
module = __import__(modname, fromlist=[""])
name, _ = os.path.splitext(module.__file__)
if not os.path.exists(name + '.py'):
logging.info("skipping %s due to being an orphan .pyc", module.__file__)
continue
modules.append(module)
return modules
class AlreadyDocumentedModule(object):
def __init__(self, filename):
self.filename = filename
@property
def name(self):
basename = os.path.basename(self.filename)
return os.path.splitext(basename)[0]
@property
def contents(self):
with open(self.filename, 'r') as f:
return f.read()
def GetAlreadyDocumentedModules():
modules = []
for dirname, _, filenames in os.walk(docs_dir):
for filename in filenames:
path = os.path.join(dirname, filename)
modules.append(AlreadyDocumentedModule(path))
return modules
def IsUpdateDocsNeeded():
already_documented_modules = GetAlreadyDocumentedModules()
already_documented_modules_by_name = dict(
(module.name, module) for module in already_documented_modules)
current_modules = GetAllModulesToDocument(telemetry)
# Quick check: if the names of modules has changed, we definitely need
# an update.
already_documented_module_names = set(
m.name for m in already_documented_modules)
current_module_names = set([m.__name__ for m in current_modules])
if current_module_names != already_documented_module_names:
return True
# Generate the new docs and compare aganist the old. If changed, then a
# an update is needed.
for current_module in current_modules:
already_documented_module = already_documented_modules_by_name[
current_module.__name__]
current_html = GenerateHTMLForModule(current_module)
if current_html != already_documented_module.contents:
return True
return False
def Main(args):
parser = optparse.OptionParser()
parser.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed)')
options, args = parser.parse_args(args)
if options.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif options.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
assert os.path.isdir(docs_dir)
RemoveAllDocs()
old_cwd = os.getcwd()
try:
os.chdir(telemetry_dir)
for module in GetAllModulesToDocument(telemetry):
WriteHTMLForModule(module)
finally:
os.chdir(old_cwd)
| bsd-3-clause |
tkelman/utf8rewind | tools/gyp/test/sibling/gyptest-relocate.py | 100 | 1365 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import TestGyp
test = TestGyp.TestGyp()
# The xcode-ninja generator handles gypfiles which are not at the
# project root incorrectly.
# cf. https://code.google.com/p/gyp/issues/detail?id=460
if test.format == 'xcode-ninja':
test.skip_test()
test.run_gyp('build/all.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('build/all.gyp', test.ALL, chdir='relocate/src')
chdir = 'relocate/src/build'
# The top-level Makefile is in the directory where gyp was run.
# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
# file? What about when passing in multiple .gyp files? Would sub-project
# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
if test.format in ('make', 'ninja', 'cmake'):
chdir = 'relocate/src'
if test.format == 'xcode':
chdir = 'relocate/src/prog1'
test.run_built_executable('program1',
chdir=chdir,
stdout="Hello from prog1.c\n")
if test.format == 'xcode':
chdir = 'relocate/src/prog2'
test.run_built_executable('program2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
| mit |
theguardian/JIRA-APPy | lib/tlslite/integration/pop3_tls.py | 1 | 3161 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""TLS Lite + poplib."""
import socket
from poplib import POP3, POP3_SSL_PORT
from lib.tlslite.tlsconnection import TLSConnection
from lib.tlslite.integration.clienthelper import ClientHelper
class POP3_TLS(POP3, ClientHelper):
"""This class extends L{poplib.POP3} with TLS support."""
def __init__(self, host, port = POP3_SSL_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
username=None, password=None,
certChain=None, privateKey=None,
checker=None,
settings=None):
"""Create a new POP3_TLS.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
you can do certificate-based server
authentication with one of these argument combinations:
- x509Fingerprint
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type host: str
@param host: Server to connect to.
@type port: int
@param port: Port to connect to.
@type username: str
@param username: SRP username.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.x509certchain.X509CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP argument.
@type privateKey: L{tlslite.utils.rsakey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP argument.
@type checker: L{tlslite.checker.Checker}
@param checker: Callable object called after handshaking to
evaluate the connection and raise an Exception if necessary.
@type settings: L{tlslite.handshakesettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.host = host
self.port = port
sock = socket.create_connection((host, port), timeout)
ClientHelper.__init__(self,
username, password,
certChain, privateKey,
checker,
settings)
connection = TLSConnection(sock)
ClientHelper._handshake(self, connection)
self.sock = connection
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
| gpl-2.0 |
lemontree-testing/python_training | generator/contact.py | 1 | 1376 | __author__ = 'lemontree'
from model.contact import Contacts
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation +" "*10
return prefix + "".join(random.choice(symbols)for i in range(random.randrange(maxlen)))
testdata = [Contacts(name = "",lastname = "", nickname = "", company = "", address = "", homephone = "", email = "",year_of_birth = "")] + [
Contacts(name = random_string("Name", 20), lastname = random_string("LastName", 30), nickname = random_string("Nickname", 40),
company = random_string("Company", 30), address = random_string("Addr", 100), homephone = random_string("home", 10),
email = random_string("mail1",20), year_of_birth = random.randrange(0000,1000)) for i in range(5)]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent = 2)
out.write(jsonpickle.encode(testdata)) | apache-2.0 |
mozilla/verbatim | vendor/lib/python/translate/filters/test_prefilters.py | 33 | 1123 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""tests decoration handling functions that are used by checks"""
from translate.filters import prefilters
def test_removekdecomments():
assert prefilters.removekdecomments(u"Some sṱring") == u"Some sṱring"
assert prefilters.removekdecomments(u"_: Commenṱ\\n\nSome sṱring") == u"Some sṱring"
assert prefilters.removekdecomments(u"_: Commenṱ\\n\n") == u""
def test_filterwordswithpunctuation():
string = u"Nothing in here."
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == string
# test listed words (start / end with apostrophe)
string = u"'n Boom het 'n tak."
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == "n Boom het n tak."
# test words containing apostrophe
string = u"It's in it's own place."
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == "Its in its own place."
# test strings in unicode
string = u"Iṱ'š"
filtered = prefilters.filterwordswithpunctuation(string)
assert filtered == u"Iṱš"
| gpl-2.0 |
florian-dacosta/OpenUpgrade | addons/point_of_sale/report/account_statement.py | 380 | 2031 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class account_statement(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_statement, self).__init__(cr, uid, name, context=context)
self.total = 0.0
self.localcontext.update({
'time': time,
'get_total': self._get_total,
'get_data': self._get_data,
})
def _get_data(self, statement):
lines = []
for line in statement.line_ids:
lines.append(line)
return lines
def _get_total(self, statement_line_ids):
total = 0.0
for line in statement_line_ids:
total += line.amount
return total
class report_account_statement(osv.AbstractModel):
_name = 'report.point_of_sale.report_statement'
_inherit = 'report.abstract_report'
_template = 'point_of_sale.report_statement'
_wrapped_report_class = account_statement
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
matthiasdiener/spack | var/spack/repos/builtin/packages/vegas2/package.py | 5 | 2039 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Vegas2(Package):
""""VEGAS2 is an extension that uses 1,000 Genomes data to model SNP
correlations across the autosomes and chromosome X"""
homepage = "https://vegas2.qimrberghofer.edu.au/"
url = "https://vegas2.qimrberghofer.edu.au/vegas2v2"
version('2', '815d80b86e9e294f99332bb5181e897a', expand=False)
depends_on('perl', type='run')
depends_on('r', type='run')
depends_on('plink')
depends_on('r-mvtnorm', type='run')
depends_on('r-corpcor', type='run')
def url_for_version(self, version):
url = 'https://vegas2.qimrberghofer.edu.au/vegas2v{0}'
return url.format(version)
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('vegas2v{0}'.format(self.version), prefix.bin)
| lgpl-2.1 |
thelo-gaultier/itri-disco-ci | nodepool/scripts/cache_git_repos.py | 22 | 2918 | #!/usr/bin/env python
# Copyright (C) 2011-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import re
import shutil
import sys
import urllib2
from common import run_local
URL = ('https://git.openstack.org/cgit/openstack-infra/project-config/'
'plain/gerrit/projects.yaml')
PROJECT_RE = re.compile('^-?\s+project:\s+(.*)$')
# Not using an arg libraries in order to avoid module imports that
# are not available across all python versions
if len(sys.argv) > 1:
GIT_BASE = sys.argv[1]
else:
GIT_BASE = 'git://git.openstack.org'
def clone_repo(project):
remote = '%s/%s.git' % (GIT_BASE, project)
# Clear out any existing target directory first, in case of a retry.
try:
shutil.rmtree(os.path.join('/opt/git', project))
except OSError:
pass
# Try to clone the requested git repository.
(status, out) = run_local(['git', 'clone', remote, project],
status=True, cwd='/opt/git')
# If it claims to have worked, make sure we can list branches.
if status == 0:
(status, moreout) = run_local(['git', 'branch', '-a'], status=True,
cwd=os.path.join('/opt/git', project))
out = '\n'.join((out, moreout))
# If that worked, try resetting to HEAD to make sure it's there.
if status == 0:
(status, moreout) = run_local(['git', 'reset', '--hard', 'HEAD'],
status=True,
cwd=os.path.join('/opt/git', project))
out = '\n'.join((out, moreout))
# Status of 0 imples all the above worked, 1 means something failed.
return (status, out)
def main():
# TODO(jeblair): use gerrit rest api when available
data = urllib2.urlopen(URL).read()
for line in data.split('\n'):
# We're regex-parsing YAML so that we don't have to depend on the
# YAML module which is not in the stdlib.
m = PROJECT_RE.match(line)
if m:
(status, out) = clone_repo(m.group(1))
print out
if status != 0:
print 'Retrying to clone %s' % m.group(1)
(status, out) = clone_repo(m.group(1))
print out
if status != 0:
raise Exception('Failed to clone %s' % m.group(1))
if __name__ == '__main__':
main()
| apache-2.0 |
rednach/krill | test/test_template_loop.py | 15 | 1160 | #!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestTemplateLoop(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_template_loop.cfg')
def test_dummy(self):
self.assertFalse(self.conf.conf_is_correct)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
msabramo/ansible | lib/ansible/modules/windows/win_command.py | 6 | 4268 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Ansible, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_command
short_description: Executes a command on a remote Windows node
version_added: 2.2
description:
- The C(win_command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($env:HOME) and operations
like C("<"), C(">"), C("|"), and C(";") will not work (use the M(win_shell)
module if you need these features).
options:
free_form:
description:
- the win_command module takes a free form command to run. There is no parameter actually named 'free form'.
See the examples!
required: true
creates:
description:
- a path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
removes:
description:
- a path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
chdir:
description:
- set the specified path as the current working directory before executing a command
notes:
- If you want to run a command through a shell (say you are using C(<),
C(>), C(|), etc), you actually want the M(win_shell) module instead. The
C(win_command) module is much more secure as it's not affected by the user's
environment.
- " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this."
author:
- Matt Davis
'''
EXAMPLES = r'''
# Example from Ansible Playbooks.
- win_command: whoami
register: whoami_out
# Run the command only if the specified file does not exist.
- win_command: wbadmin -backupTarget:C:\backup\ creates=C:\backup\
# You can also use the 'args' form to provide the options. This command
# will change the working directory to C:\somedir\\ and will only run when
# C:\backup\ doesn't exist.
- win_command: wbadmin -backupTarget:C:\backup\ creates=C:\backup\
args:
chdir: C:\somedir\
creates: C:\backup\
'''
RETURN = r'''
msg:
description: changed
returned: always
type: boolean
sample: True
start:
description: The command execution start time
returned: always
type: string
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time
returned: always
type: string
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time
returned: always
type: string
sample: '0:00:00.325771'
stdout:
description: The command standard output
returned: always
type: string
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error
returned: always
type: string
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: string
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list of strings
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
'''
| gpl-3.0 |
francisco-dlp/hyperspy | hyperspy/tests/component/test_erf.py | 3 | 1327 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from numpy.testing import assert_allclose
import pytest
from distutils.version import LooseVersion
import sympy
from hyperspy.components1d import Erf
pytestmark = pytest.mark.skipif(LooseVersion(sympy.__version__) <
LooseVersion("1.3"),
reason="This test requires SymPy >= 1.3")
def test_function():
g = Erf()
g.A.value = 1
g.sigma.value = 2
g.origin.value = 3
assert g.function(3) == 0.
assert_allclose(g.function(15),0.5)
assert_allclose(g.function(1.951198),-0.2,rtol=1e-6)
| gpl-3.0 |
jboeuf/grpc | src/python/grpcio_testing/grpc_testing/_server/_rpc.py | 19 | 5233 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import grpc
from grpc_testing import _common
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
class Rpc(object):
def __init__(self, handler, invocation_metadata):
self._condition = threading.Condition()
self._handler = handler
self._invocation_metadata = invocation_metadata
self._initial_metadata_sent = False
self._pending_trailing_metadata = None
self._pending_code = None
self._pending_details = None
self._callbacks = []
self._active = True
self._rpc_errors = []
def _ensure_initial_metadata_sent(self):
if not self._initial_metadata_sent:
self._handler.send_initial_metadata(_common.FUSSED_EMPTY_METADATA)
self._initial_metadata_sent = True
def _call_back(self):
callbacks = tuple(self._callbacks)
self._callbacks = None
def call_back():
for callback in callbacks:
try:
callback()
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Exception calling server-side callback!')
callback_calling_thread = threading.Thread(target=call_back)
callback_calling_thread.start()
def _terminate(self, trailing_metadata, code, details):
if self._active:
self._active = False
self._handler.send_termination(trailing_metadata, code, details)
self._call_back()
self._condition.notify_all()
def _complete(self):
if self._pending_trailing_metadata is None:
trailing_metadata = _common.FUSSED_EMPTY_METADATA
else:
trailing_metadata = self._pending_trailing_metadata
if self._pending_code is None:
code = grpc.StatusCode.OK
else:
code = self._pending_code
details = '' if self._pending_details is None else self._pending_details
self._terminate(trailing_metadata, code, details)
def _abort(self, code, details):
self._terminate(_common.FUSSED_EMPTY_METADATA, code, details)
def add_rpc_error(self, rpc_error):
with self._condition:
self._rpc_errors.append(rpc_error)
def application_cancel(self):
with self._condition:
self._abort(grpc.StatusCode.CANCELLED,
'Cancelled by server-side application!')
def application_exception_abort(self, exception):
with self._condition:
if exception not in self._rpc_errors:
_LOGGER.exception('Exception calling application!')
self._abort(
grpc.StatusCode.UNKNOWN,
'Exception calling application: {}'.format(exception))
def extrinsic_abort(self):
with self._condition:
if self._active:
self._active = False
self._call_back()
self._condition.notify_all()
def unary_response_complete(self, response):
with self._condition:
self._ensure_initial_metadata_sent()
self._handler.add_response(response)
self._complete()
def stream_response(self, response):
with self._condition:
self._ensure_initial_metadata_sent()
self._handler.add_response(response)
def stream_response_complete(self):
with self._condition:
self._ensure_initial_metadata_sent()
self._complete()
def send_initial_metadata(self, initial_metadata):
with self._condition:
if self._initial_metadata_sent:
return False
else:
self._handler.send_initial_metadata(initial_metadata)
self._initial_metadata_sent = True
return True
def is_active(self):
with self._condition:
return self._active
def add_callback(self, callback):
with self._condition:
if self._callbacks is None:
return False
else:
self._callbacks.append(callback)
return True
def invocation_metadata(self):
with self._condition:
return self._invocation_metadata
def set_trailing_metadata(self, trailing_metadata):
with self._condition:
self._pending_trailing_metadata = trailing_metadata
def set_code(self, code):
with self._condition:
self._pending_code = code
def set_details(self, details):
with self._condition:
self._pending_details = details
| apache-2.0 |
maxhome1/l10n-italy | l10n_it_withholding_tax/__init__.py | 11 | 1133 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2012 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2012-2013 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account
| agpl-3.0 |
TheChymera/chr-helpers | chr_matplotlib.py | 1 | 2908 | __author__="Paul H, Horea Christian, Leonor Carcia Gutierrez"
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
def auto_remap(data):
start=0
midpoint=0.5
stop=1.0
if np.nanmin(data) >= 0:
raise ValueError('You do not need to rescale your cmap to center zero.')
if np.nanmax(data) > abs(np.nanmin(data)):
start = (np.nanmax(data)-abs(np.nanmin(data)))/(2*np.nanmax(data))
midpoint = abs(np.nanmin(data))/(np.nanmax(data)+abs(np.nanmin(data)))
stop = 1.0
if np.nanmax(data) == abs(np.nanmin(data)):
start = 0
midpoint = 0.5
stop = 1.0
if np.nanmax(data) < abs(np.nanmin(data)):
start = 0
midpoint = abs(np.nanmin(data))/(np.nanmax(data)+abs(np.nanmin(data)))
stop = (abs(np.nanmin(data))-np.nanmax(data))/(2*abs(np.nanmin(data)))
return start, midpoint, stop
def remappedColorMap(cmap, data=False, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the median value of a colormap, and scale the
remaining color range. Useful for data with a negative minimum and
positive maximum where you want the middle of the colormap's dynamic
range to be at zero.
Input
-----
cmap : The matplotlib colormap to be altered
data: You can provide your data as a numpy array, and the following
operations will be computed automatically for you.
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and 0.5; if your dataset vmax <= abs(vmin) you should leave
this at 0.0, otherwise to (vmax-abs(vmin))/(2*vmax)
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0; usually the
optimal value is abs(vmin)/(vmax+abs(vmin))
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
0.5 and 1.0; if your dataset vmax >= abs(vmin) you should leave
this at 1.0, otherwise to (abs(vmin)-vmax)/(2*abs(vmin))
'''
if isinstance(data, np.ndarray):
start, midpoint, stop = auto_remap(data)
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.hstack([
np.linspace(start, 0.5, 128, endpoint=False),
np.linspace(0.5, stop, 129)
])
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
| gpl-3.0 |
dqnykamp/sympy | sympy/matrices/matrices.py | 1 | 127921 | from __future__ import print_function, division
import collections
from sympy.core.add import Add
from sympy.core.basic import Basic, C, Atom
from sympy.core.expr import Expr
from sympy.core.function import count_ops
from sympy.core.logic import _fuzzy_group_inverse, fuzzy_and
from sympy.core.power import Pow
from sympy.core.symbol import Symbol, Dummy, symbols
from sympy.core.numbers import Integer, ilcm, Rational, Float
from sympy.core.singleton import S
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, default_sort_key, xrange, NotIterable
from sympy.polys import PurePoly, roots, cancel, gcd
from sympy.simplify import simplify as _simplify, signsimp, nsimplify
from sympy.utilities.iterables import flatten
from sympy.functions.elementary.miscellaneous import sqrt, Max, Min
from sympy.functions import exp, factorial
from sympy.printing import sstr
from sympy.core.compatibility import reduce, as_int, string_types
from sympy.utilities.exceptions import SymPyDeprecationWarning
from types import FunctionType
def _iszero(x):
"""Returns True if x is zero."""
return x.is_zero
class MatrixError(Exception):
pass
class ShapeError(ValueError, MatrixError):
"""Wrong matrix shape"""
pass
class NonSquareMatrixError(ShapeError):
pass
class DeferredVector(Symbol, NotIterable):
"""A vector whose components are deferred (e.g. for use with lambdify)
Examples
========
>>> from sympy import DeferredVector, lambdify
>>> X = DeferredVector( 'X' )
>>> X
X
>>> expr = (X[0] + 2, X[2] + 3)
>>> func = lambdify( X, expr)
>>> func( [1, 2, 3] )
(3, 6)
"""
def __getitem__(self, i):
if i == -0:
i = 0
if i < 0:
raise IndexError('DeferredVector index out of range')
component_name = '%s[%d]' % (self.name, i)
return Symbol(component_name)
def __str__(self):
return sstr(self)
def __repr__(self):
return "DeferredVector('%s')" % (self.name)
class MatrixBase(object):
# Added just for numpy compatibility
__array_priority__ = 11
is_Matrix = True
is_Identity = None
_class_priority = 3
_sympify = staticmethod(sympify)
__hash__ = None # Mutable
@classmethod
def _handle_creation_inputs(cls, *args, **kwargs):
"""Return the number of rows, cols and flat matrix elements.
Examples
========
>>> from sympy import Matrix, I
Matrix can be constructed as follows:
* from a nested list of iterables
>>> Matrix( ((1, 2+I), (3, 4)) )
Matrix([
[1, 2 + I],
[3, 4]])
* from un-nested iterable (interpreted as a column)
>>> Matrix( [1, 2] )
Matrix([
[1],
[2]])
* from un-nested iterable with dimensions
>>> Matrix(1, 2, [1, 2] )
Matrix([[1, 2]])
* from no arguments (a 0 x 0 matrix)
>>> Matrix()
Matrix(0, 0, [])
* from a rule
>>> Matrix(2, 2, lambda i, j: i/(j + 1) )
Matrix([
[0, 0],
[1, 1/2]])
"""
from sympy.matrices.sparse import SparseMatrix
# Matrix(SparseMatrix(...))
if len(args) == 1 and isinstance(args[0], SparseMatrix):
return args[0].rows, args[0].cols, flatten(args[0].tolist())
# Matrix(Matrix(...))
if len(args) == 1 and isinstance(args[0], MatrixBase):
return args[0].rows, args[0].cols, args[0]._mat
# Matrix(MatrixSymbol('X', 2, 2))
if len(args) == 1 and isinstance(args[0], Basic) and args[0].is_Matrix:
return args[0].rows, args[0].cols, args[0].as_explicit()._mat
if len(args) == 3:
rows = as_int(args[0])
cols = as_int(args[1])
# Matrix(2, 2, lambda i, j: i+j)
if len(args) == 3 and isinstance(args[2], collections.Callable):
operation = args[2]
flat_list = []
for i in range(rows):
flat_list.extend([cls._sympify(operation(cls._sympify(i), j))
for j in range(cols)])
# Matrix(2, 2, [1, 2, 3, 4])
elif len(args) == 3 and is_sequence(args[2]):
flat_list = args[2]
if len(flat_list) != rows*cols:
raise ValueError('List length should be equal to rows*columns')
flat_list = [cls._sympify(i) for i in flat_list]
# Matrix(numpy.ones((2, 2)))
elif len(args) == 1 and hasattr(args[0], "__array__"):
# NumPy array or matrix or some other object that implements
# __array__. So let's first use this method to get a
# numpy.array() and then make a python list out of it.
arr = args[0].__array__()
if len(arr.shape) == 2:
rows, cols = arr.shape[0], arr.shape[1]
flat_list = [cls._sympify(i) for i in arr.ravel()]
return rows, cols, flat_list
elif len(arr.shape) == 1:
rows, cols = arr.shape[0], 1
flat_list = [S.Zero]*rows
for i in range(len(arr)):
flat_list[i] = cls._sympify(arr[i])
return rows, cols, flat_list
else:
raise NotImplementedError(
"SymPy supports just 1D and 2D matrices")
# Matrix([1, 2, 3]) or Matrix([[1, 2], [3, 4]])
elif len(args) == 1 and is_sequence(args[0])\
and not isinstance(args[0], DeferredVector):
in_mat = []
ncol = set()
for row in args[0]:
if isinstance(row, MatrixBase):
in_mat.extend(row.tolist())
if row.cols or row.rows: # only pay attention if it's not 0x0
ncol.add(row.cols)
else:
in_mat.append(row)
try:
ncol.add(len(row))
except TypeError:
ncol.add(1)
if len(ncol) > 1:
raise ValueError("Got rows of variable lengths: %s" %
sorted(list(ncol)))
cols = ncol.pop() if ncol else 0
rows = len(in_mat) if cols else 0
if rows:
if not is_sequence(in_mat[0]):
cols = 1
flat_list = [cls._sympify(i) for i in in_mat]
return rows, cols, flat_list
flat_list = []
for j in range(rows):
for i in range(cols):
flat_list.append(cls._sympify(in_mat[j][i]))
# Matrix()
elif len(args) == 0:
# Empty Matrix
rows = cols = 0
flat_list = []
else:
raise TypeError("Data type not understood")
return rows, cols, flat_list
def _setitem(self, key, value):
"""Helper to set value at location given by key.
Examples
========
>>> from sympy import Matrix, I, zeros, ones
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
from .dense import Matrix
is_slice = isinstance(key, slice)
i, j = key = self.key2ij(key)
is_mat = isinstance(value, MatrixBase)
if type(i) is slice or type(j) is slice:
if is_mat:
self.copyin_matrix(key, value)
return
if not isinstance(value, Expr) and is_sequence(value):
self.copyin_list(key, value)
return
raise ValueError('unexpected value: %s' % value)
else:
if (not is_mat and
not isinstance(value, Basic) and is_sequence(value)):
value = Matrix(value)
is_mat = True
if is_mat:
if is_slice:
key = (slice(*divmod(i, self.cols)),
slice(*divmod(j, self.cols)))
else:
key = (slice(i, i + value.rows),
slice(j, j + value.cols))
self.copyin_matrix(key, value)
else:
return i, j, self._sympify(value)
return
def copy(self):
return self._new(self.rows, self.cols, self._mat)
def trace(self):
if not self.is_square:
raise NonSquareMatrixError()
return self._eval_trace()
def inv(self, method=None, **kwargs):
if not self.is_square:
raise NonSquareMatrixError()
if method is not None:
kwargs['method'] = method
return self._eval_inverse(**kwargs)
def inv_mod(self, m):
r"""
Returns the inverse of the matrix `K` (mod `m`), if it exists.
Method to find the matrix inverse of `K` (mod `m`) implemented in this function:
* Compute `\mathrm{adj}(K) = \mathrm{cof}(K)^t`, the adjoint matrix of `K`.
* Compute `r = 1/\mathrm{det}(K) \pmod m`.
* `K^{-1} = r\cdot \mathrm{adj}(K) \pmod m`.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix(2, 2, [1, 2, 3, 4])
>>> A.inv_mod(5)
Matrix([
[3, 1],
[4, 2]])
>>> A.inv_mod(3)
Matrix([
[1, 1],
[0, 1]])
"""
from sympy.ntheory import totient
if not self.is_square:
raise NonSquareMatrixError()
N = self.cols
phi = totient(m)
det_K = self.det()
if gcd(det_K, m) != 1:
raise ValueError('Matrix is not invertible (mod %d)' % m)
det_inv = pow(int(det_K), int(phi - 1), int(m))
K_adj = self.cofactorMatrix().transpose()
K_inv = self.__class__(N, N, [det_inv*K_adj[i, j] % m for i in range(N) for j in range(N)])
return K_inv
def transpose(self):
return self._eval_transpose()
T = property(transpose, None, None, "Matrix transposition.")
def conjugate(self):
return self._eval_conjugate()
C = property(conjugate, None, None, "By-element conjugation.")
def adjoint(self):
"""Conjugate transpose or Hermitian conjugation."""
return self.T.C
@property
def H(self):
"""Return Hermite conjugate.
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m
Matrix([
[ 0],
[1 + I],
[ 2],
[ 3]])
>>> m.H
Matrix([[0, 1 - I, 2, 3]])
See Also
========
conjugate: By-element conjugation
D: Dirac conjugation
"""
return self.T.C
@property
def D(self):
"""Return Dirac conjugate (if self.rows == 4).
Examples
========
>>> from sympy import Matrix, I, eye
>>> m = Matrix((0, 1 + I, 2, 3))
>>> m.D
Matrix([[0, 1 - I, -2, -3]])
>>> m = (eye(4) + I*eye(4))
>>> m[0, 3] = 2
>>> m.D
Matrix([
[1 - I, 0, 0, 0],
[ 0, 1 - I, 0, 0],
[ 0, 0, -1 + I, 0],
[ 2, 0, 0, -1 + I]])
If the matrix does not have 4 rows an AttributeError will be raised
because this property is only defined for matrices with 4 rows.
>>> Matrix(eye(2)).D
Traceback (most recent call last):
...
AttributeError: Matrix has no attribute D.
See Also
========
conjugate: By-element conjugation
H: Hermite conjugation
"""
from sympy.physics.matrices import mgamma
if self.rows != 4:
# In Python 3.2, properties can only return an AttributeError
# so we can't raise a ShapeError -- see commit which added the
# first line of this inline comment. Also, there is no need
# for a message since MatrixBase will raise the AttributeError
raise AttributeError
return self.H*mgamma(0)
def __array__(self):
from .dense import matrix2numpy
return matrix2numpy(self)
def __len__(self):
"""Return the number of elements of self.
Implemented mainly so bool(Matrix()) == False.
"""
return self.rows*self.cols
@property
def shape(self):
"""The shape (dimensions) of the matrix as the 2-tuple (rows, cols).
Examples
========
>>> from sympy.matrices import zeros
>>> M = zeros(2, 3)
>>> M.shape
(2, 3)
>>> M.rows
2
>>> M.cols
3
"""
return (self.rows, self.cols)
def __sub__(self, a):
return self + (-a)
def __rsub__(self, a):
return (-self) + a
def __mul__(self, other):
"""Return self*other where other is either a scalar or a matrix
of compatible dimensions.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> 2*A == A*2 == Matrix([[2, 4, 6], [8, 10, 12]])
True
>>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> A*B
Matrix([
[30, 36, 42],
[66, 81, 96]])
>>> B*A
Traceback (most recent call last):
...
ShapeError: Matrices size mismatch.
>>>
See Also
========
matrix_multiply_elementwise
"""
if getattr(other, 'is_Matrix', False):
# The following implmentation is equivalent, but about 5% slower
#ma, na = A.shape
#mb, nb = B.shape
#
#if na != mb:
# raise ShapeError()
#product = Matrix(ma, nb, lambda i, j: 0)
#for i in range(ma):
# for j in range(nb):
# s = 0
# for k in range(na):
# s += A[i, k]*B[k, j]
# product[i, j] = s
#return product
A = self
B = other
if A.cols != B.rows:
raise ShapeError("Matrices size mismatch.")
if A.cols == 0:
return classof(A, B)._new(A.rows, B.cols, lambda i, j: 0)
blst = B.T.tolist()
alst = A.tolist()
return classof(A, B)._new(A.rows, B.cols, lambda i, j:
reduce(lambda k, l: k + l,
[a_ik * b_kj for a_ik, b_kj in zip(alst[i], blst[j])]))
else:
return self._new(self.rows, self.cols,
[i*other for i in self._mat])
def __rmul__(self, a):
if getattr(a, 'is_Matrix', False):
return self._new(a)*self
return self*a
def __pow__(self, num):
from sympy.matrices import eye
if not self.is_square:
raise NonSquareMatrixError()
if isinstance(num, int) or isinstance(num, Integer):
n = int(num)
if n < 0:
return self.inv()**-n # A**-2 = (A**-1)**2
a = eye(self.cols)
s = self
while n:
if n % 2:
a *= s
n -= 1
if not n:
break
s *= s
n //= 2
return self._new(a)
elif isinstance(num, Rational):
try:
P, D = self.diagonalize()
except MatrixError:
raise NotImplementedError(
"Implemented only for diagonalizable matrices")
for i in range(D.rows):
D[i, i] = D[i, i]**num
return self._new(P*D*P.inv())
else:
raise NotImplementedError(
"Only integer and rational values are supported")
def __add__(self, other):
"""Return self + other, raising ShapeError if shapes don't match."""
if getattr(other, 'is_Matrix', False):
A = self
B = other
if A.shape != B.shape:
raise ShapeError("Matrix size mismatch.")
alst = A.tolist()
blst = B.tolist()
ret = [S.Zero]*A.rows
for i in range(A.shape[0]):
ret[i] = list(map(lambda j, k: j + k, alst[i], blst[i]))
rv = classof(A, B)._new(ret)
if 0 in A.shape:
rv = rv.reshape(*A.shape)
return rv
raise TypeError('cannot add matrix and %s' % type(other))
def __radd__(self, other):
return self + other
def __div__(self, other):
return self*(S.One / other)
def __truediv__(self, other):
return self.__div__(other)
def __neg__(self):
return -1*self
def multiply(self, b):
"""Returns self*b
See Also
========
dot
cross
multiply_elementwise
"""
return self*b
def add(self, b):
"""Return self + b """
return self + b
def table(self, printer, rowsep='\n', colsep=', ', align='right'):
r"""
String form of Matrix as a table.
``printer`` is the printer to use for on the elements (generally
something like StrPrinter())
``rowsep`` is the string used to separate rows (by default a newline).
``colsep`` is the string used to separate columns (by default ', ').
``align`` defines how the elements are aligned. Must be one of 'left',
'right', or 'center'. You can also use '<', '>', and '^' to mean the
same thing, respectively.
This is used by the string printer for Matrix.
Examples
========
>>> from sympy import Matrix
>>> from sympy.printing.str import StrPrinter
>>> M = Matrix([[1, 2], [-33, 4]])
>>> printer = StrPrinter()
>>> M.table(printer)
'[ 1, 2]\n[-33, 4]'
>>> print(M.table(printer))
[ 1, 2]
[-33, 4]
>>> print(M.table(printer, rowsep=',\n'))
[ 1, 2],
[-33, 4]
>>> print('[%s]' % M.table(printer, rowsep=',\n'))
[[ 1, 2],
[-33, 4]]
>>> print(M.table(printer, colsep=' '))
[ 1 2]
[-33 4]
>>> print(M.table(printer, align='center'))
[ 1 , 2]
[-33, 4]
"""
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return '[]'
# Build table of string representations of the elements
res = []
# Track per-column max lengths for pretty alignment
maxlen = [0] * self.cols
for i in range(self.rows):
res.append([])
for j in range(self.cols):
s = printer._print(self[i,j])
res[-1].append(s)
maxlen[j] = max(len(s), maxlen[j])
# Patch strings together
align = {
'left': 'ljust',
'right': 'rjust',
'center': 'center',
'<': 'ljust',
'>': 'rjust',
'^': 'center',
}[align]
for i, row in enumerate(res):
for j, elem in enumerate(row):
row[j] = getattr(elem, align)(maxlen[j])
res[i] = "[" + colsep.join(row) + "]"
return rowsep.join(res)
def _format_str(self, printer=None):
if not printer:
from sympy.printing.str import StrPrinter
printer = StrPrinter()
# Handle zero dimensions:
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
if self.rows == 1:
return "Matrix([%s])" % self.table(printer, rowsep=',\n')
return "Matrix([\n%s])" % self.table(printer, rowsep=',\n')
def __str__(self):
if self.rows == 0 or self.cols == 0:
return 'Matrix(%s, %s, [])' % (self.rows, self.cols)
return "Matrix(%s)" % str(self.tolist())
def __repr__(self):
return sstr(self)
def cholesky(self):
"""Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T
Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
See Also
========
LDLdecomposition
LUdecomposition
QRdecomposition
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._cholesky()
def LDLdecomposition(self):
"""Returns the LDL Decomposition (L, D) of matrix A,
such that L * D * L.T == A
This method eliminates the use of square root.
Further this ensures that all the diagonal entries of L are 1.
A must be a square, symmetric, positive-definite
and non-singular matrix.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T * A.inv() == eye(A.rows)
True
See Also
========
cholesky
LUdecomposition
QRdecomposition
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if not self.is_symmetric():
raise ValueError("Matrix must be symmetric.")
return self._LDLdecomposition()
def lower_triangular_solve(self, rhs):
"""Solves Ax = B, where A is a lower triangular matrix.
See Also
========
upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != self.rows:
raise ShapeError("Matrices size mismatch.")
if not self.is_lower:
raise ValueError("Matrix must be lower triangular.")
return self._lower_triangular_solve(rhs)
def upper_triangular_solve(self, rhs):
"""Solves Ax = B, where A is an upper triangular matrix.
See Also
========
lower_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not self.is_square:
raise NonSquareMatrixError("Matrix must be square.")
if rhs.rows != self.rows:
raise TypeError("Matrix size mismatch.")
if not self.is_upper:
raise TypeError("Matrix is not upper triangular.")
return self._upper_triangular_solve(rhs)
def cholesky_solve(self, rhs):
"""Solves Ax = B using Cholesky decomposition,
for a general square non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
See Also
========
lower_triangular_solve
upper_triangular_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if self.is_symmetric():
L = self._cholesky()
elif self.rows >= self.cols:
L = (self.T*self)._cholesky()
rhs = self.T*rhs
else:
raise NotImplementedError("Under-determined System.")
Y = L._lower_triangular_solve(rhs)
return (L.T)._upper_triangular_solve(Y)
def diagonal_solve(self, rhs):
"""Solves Ax = B efficiently, where A is a diagonal Matrix,
with non-zero diagonal entries.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.diagonal_solve(B) == B/2
True
See Also
========
lower_triangular_solve
upper_triangular_solve
cholesky_solve
LDLsolve
LUsolve
QRsolve
pinv_solve
"""
if not self.is_diagonal:
raise TypeError("Matrix should be diagonal")
if rhs.rows != self.rows:
raise TypeError("Size mis-match")
return self._diagonal_solve(rhs)
def LDLsolve(self, rhs):
"""Solves Ax = B using LDL decomposition,
for a general square and non-singular matrix.
For a non-square matrix with rows > cols,
the least squares solution is returned.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> A = eye(2)*2
>>> B = Matrix([[1, 2], [3, 4]])
>>> A.LDLsolve(B) == B/2
True
See Also
========
LDLdecomposition
lower_triangular_solve
upper_triangular_solve
cholesky_solve
diagonal_solve
LUsolve
QRsolve
pinv_solve
"""
if self.is_symmetric():
L, D = self.LDLdecomposition()
elif self.rows >= self.cols:
L, D = (self.T*self).LDLdecomposition()
rhs = self.T*rhs
else:
raise NotImplementedError("Under-determined System.")
Y = L._lower_triangular_solve(rhs)
Z = D._diagonal_solve(Y)
return (L.T)._upper_triangular_solve(Z)
def solve_least_squares(self, rhs, method='CH'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = Matrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
if method == 'CH':
return self.cholesky_solve(rhs)
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='GE'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system.')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method)*rhs
def __mathml__(self):
mml = ""
for i in range(self.rows):
mml += "<matrixrow>"
for j in range(self.cols):
mml += self[i, j].__mathml__()
mml += "</matrixrow>"
return "<matrix>" + mml + "</matrix>"
def extract(self, rowsList, colsList):
"""Return a submatrix by specifying a list of rows and columns.
Negative indices can be given. All indices must be in the range
-n <= i < n where n is the number of rows or columns.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 3, range(12))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
>>> m.extract([0, 1, 3], [0, 1])
Matrix([
[0, 1],
[3, 4],
[9, 10]])
Rows or columns can be repeated:
>>> m.extract([0, 0, 1], [-1])
Matrix([
[2],
[2],
[5]])
Every other row can be taken by using range to provide the indices:
>>> m.extract(range(0, m.rows, 2), [-1])
Matrix([
[2],
[8]])
"""
cols = self.cols
flat_list = self._mat
rowsList = [a2idx(k, self.rows) for k in rowsList]
colsList = [a2idx(k, self.cols) for k in colsList]
return self._new(len(rowsList), len(colsList),
lambda i, j: flat_list[rowsList[i]*cols + colsList[j]])
def key2bounds(self, keys):
"""Converts a key with potentially mixed types of keys (integer and slice)
into a tuple of ranges and raises an error if any index is out of self's
range.
See Also
========
key2ij
"""
islice, jslice = [isinstance(k, slice) for k in keys]
if islice:
if not self.rows:
rlo = rhi = 0
else:
rlo, rhi = keys[0].indices(self.rows)[:2]
else:
rlo = a2idx(keys[0], self.rows)
rhi = rlo + 1
if jslice:
if not self.cols:
clo = chi = 0
else:
clo, chi = keys[1].indices(self.cols)[:2]
else:
clo = a2idx(keys[1], self.cols)
chi = clo + 1
return rlo, rhi, clo, chi
def key2ij(self, key):
"""Converts key into canonical form, converting integers or indexable
items into valid integers for self's range or returning slices
unchanged.
See Also
========
key2bounds
"""
if is_sequence(key):
if not len(key) == 2:
raise TypeError('key must be a sequence of length 2')
return [a2idx(i, n) if not isinstance(i, slice) else i
for i, n in zip(key, self.shape)]
elif isinstance(key, slice):
return key.indices(len(self))[:2]
else:
return divmod(a2idx(key, len(self)), self.cols)
def evalf(self, prec=None, **options):
"""Apply evalf() to each element of self."""
return self.applyfunc(lambda i: i.evalf(prec, **options))
n = evalf
def atoms(self, *types):
"""Returns the atoms that form the current object.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import Matrix
>>> Matrix([[x]])
Matrix([[x]])
>>> _.atoms()
set([x])
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
else:
types = (Atom,)
result = set()
for i in self:
result.update( i.atoms(*types) )
return result
@property
def free_symbols(self):
"""Returns the free symbols within the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix([[x], [1]]).free_symbols
set([x])
"""
return set.union(*[i.free_symbols for i in self])
def subs(self, *args, **kwargs): # should mirror core.basic.subs
"""Return a new matrix with subs applied to each entry.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.matrices import SparseMatrix, Matrix
>>> SparseMatrix(1, 1, [x])
Matrix([[x]])
>>> _.subs(x, y)
Matrix([[y]])
>>> Matrix(_).subs(y, x)
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.subs(*args, **kwargs))
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""Apply core.function.expand to each entry of the matrix.
Examples
========
>>> from sympy.abc import x
>>> from sympy.matrices import Matrix
>>> Matrix(1, 1, [x*(x+1)])
Matrix([[x*(x + 1)]])
>>> _.expand()
Matrix([[x**2 + x]])
"""
return self.applyfunc(lambda x: x.expand(
deep, modulus, power_base, power_exp, mul, log, multinomial, basic,
**hints))
def simplify(self, ratio=1.7, measure=count_ops):
"""Apply simplify to each element of the matrix.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import sin, cos
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(1, 1, [x*sin(y)**2 + x*cos(y)**2])
Matrix([[x*sin(y)**2 + x*cos(y)**2]])
>>> _.simplify()
Matrix([[x]])
"""
return self.applyfunc(lambda x: x.simplify(ratio, measure))
_eval_simplify = simplify
def doit(self, **kwargs):
return self
def print_nonzero(self, symb="X"):
"""Shows location of non-zero entries for fast shape lookup.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> m = Matrix(2, 3, lambda i, j: i*3+j)
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5]])
>>> m.print_nonzero()
[ XX]
[XXX]
>>> m = eye(4)
>>> m.print_nonzero("x")
[x ]
[ x ]
[ x ]
[ x]
"""
s = []
for i in range(self.rows):
line = []
for j in range(self.cols):
if self[i, j] == 0:
line.append(" ")
else:
line.append(str(symb))
s.append("[%s]" % ''.join(line))
print('\n'.join(s))
def LUsolve(self, rhs, iszerofunc=_iszero):
"""Solve the linear system Ax = rhs for x where A = self.
This is for symbolic matrices, for real or complex ones use
sympy.mpmath.lu_solve or sympy.mpmath.qr_solve.
See Also
========
lower_triangular_solve
upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
QRsolve
pinv_solve
LUdecomposition
"""
if rhs.rows != self.rows:
raise ShapeError("`self` and `rhs` must have the same number of rows.")
A, perm = self.LUdecomposition_Simple(iszerofunc=_iszero)
n = self.rows
b = rhs.permuteFwd(perm).as_mutable()
# forward substitution, all diag entries are scaled to 1
for i in xrange(n):
for j in xrange(i):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: x - y*scale)
# backward substitution
for i in xrange(n - 1, -1, -1):
for j in xrange(i + 1, n):
scale = A[i, j]
b.zip_row_op(i, j, lambda x, y: x - y*scale)
scale = A[i, i]
b.row_op(i, lambda x, _: x/scale)
return rhs.__class__(b)
def LUdecomposition(self, iszerofunc=_iszero):
"""Returns the decomposition LU and the row swaps p.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[4, 3], [6, 3]])
>>> L, U, _ = a.LUdecomposition()
>>> L
Matrix([
[ 1, 0],
[3/2, 1]])
>>> U
Matrix([
[4, 3],
[0, -3/2]])
See Also
========
cholesky
LDLdecomposition
QRdecomposition
LUdecomposition_Simple
LUdecompositionFF
LUsolve
"""
combined, p = self.LUdecomposition_Simple(iszerofunc=_iszero)
L = self.zeros(self.rows)
U = self.zeros(self.rows)
for i in range(self.rows):
for j in range(self.rows):
if i > j:
L[i, j] = combined[i, j]
else:
if i == j:
L[i, i] = 1
U[i, j] = combined[i, j]
return L, U, p
def LUdecomposition_Simple(self, iszerofunc=_iszero):
"""Returns A comprised of L, U (L's diag entries are 1) and
p which is the list of the row swaps (in order).
See Also
========
LUdecomposition
LUdecompositionFF
LUsolve
"""
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to apply LUdecomposition_Simple().")
n = self.rows
A = self.as_mutable()
p = []
# factorization
for j in range(n):
for i in range(j):
for k in range(i):
A[i, j] = A[i, j] - A[i, k]*A[k, j]
pivot = -1
for i in range(j, n):
for k in range(j):
A[i, j] = A[i, j] - A[i, k]*A[k, j]
# find the first non-zero pivot, includes any expression
if pivot == -1 and not iszerofunc(A[i, j]):
pivot = i
if pivot < 0:
# this result is based on iszerofunc's analysis of the possible pivots, so even though
# the element may not be strictly zero, the supplied iszerofunc's evaluation gave True
raise ValueError("No nonzero pivot found; inversion failed.")
if pivot != j: # row must be swapped
A.row_swap(pivot, j)
p.append([pivot, j])
scale = 1 / A[j, j]
for i in range(j + 1, n):
A[i, j] = A[i, j]*scale
return A, p
def LUdecompositionFF(self):
"""Compute a fraction-free LU decomposition.
Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
If the elements of the matrix belong to some integral domain I, then all
elements of L, D and U are guaranteed to belong to I.
**Reference**
- W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms
for LU and QR factors". Frontiers in Computer Science in China,
Vol 2, no. 1, pp. 67-80, 2008.
See Also
========
LUdecomposition
LUdecomposition_Simple
LUsolve
"""
from sympy.matrices import SparseMatrix
zeros = SparseMatrix.zeros
eye = SparseMatrix.eye
n, m = self.rows, self.cols
U, L, P = self.as_mutable(), eye(n), eye(n)
DD = zeros(n, n)
oldpivot = 1
for k in range(n - 1):
if U[k, k] == 0:
for kpivot in range(k + 1, n):
if U[kpivot, k]:
break
else:
raise ValueError("Matrix is not full rank")
U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:]
L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k]
P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :]
L[k, k] = Ukk = U[k, k]
DD[k, k] = oldpivot*Ukk
for i in range(k + 1, n):
L[i, k] = Uik = U[i, k]
for j in range(k + 1, m):
U[i, j] = (Ukk*U[i, j] - U[k, j]*Uik) / oldpivot
U[i, k] = 0
oldpivot = Ukk
DD[n - 1, n - 1] = oldpivot
return P, L, DD, U
def cofactorMatrix(self, method="berkowitz"):
"""Return a matrix containing the cofactor of each element.
See Also
========
cofactor
minorEntry
minorMatrix
adjugate
"""
out = self._new(self.rows, self.cols, lambda i, j:
self.cofactor(i, j, method))
return out
def minorEntry(self, i, j, method="berkowitz"):
"""Calculate the minor of an element.
See Also
========
minorMatrix
cofactor
cofactorMatrix
"""
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
return self.minorMatrix(i, j).det(method)
def minorMatrix(self, i, j):
"""Creates the minor matrix of a given element.
See Also
========
minorEntry
cofactor
cofactorMatrix
"""
if not 0 <= i < self.rows or not 0 <= j < self.cols:
raise ValueError("`i` and `j` must satisfy 0 <= i < `self.rows` " +
"(%d)" % self.rows + "and 0 <= j < `self.cols` (%d)." % self.cols)
M = self.as_mutable()
M.row_del(i)
M.col_del(j)
return self._new(M)
def cofactor(self, i, j, method="berkowitz"):
"""Calculate the cofactor of an element.
See Also
========
cofactorMatrix
minorEntry
minorMatrix
"""
if (i + j) % 2 == 0:
return self.minorEntry(i, j, method)
else:
return -1*self.minorEntry(i, j, method)
def jacobian(self, X):
"""Calculates the Jacobian matrix (derivative of a vectorial function).
Parameters
==========
self : vector of expressions representing functions f_i(x_1, ..., x_n).
X : set of x_i's in order, it can be a list or a Matrix
Both self and X can be a row or a column matrix in any order
(i.e., jacobian() should always work).
Examples
========
>>> from sympy import sin, cos, Matrix
>>> from sympy.abc import rho, phi
>>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
>>> Y = Matrix([rho, phi])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)],
[ 2*rho, 0]])
>>> X = Matrix([rho*cos(phi), rho*sin(phi)])
>>> X.jacobian(Y)
Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)]])
See Also
========
hessian
wronskian
"""
if not isinstance(X, MatrixBase):
X = self._new(X)
# Both X and self can be a row or a column matrix, so we need to make
# sure all valid combinations work, but everything else fails:
if self.shape[0] == 1:
m = self.shape[1]
elif self.shape[1] == 1:
m = self.shape[0]
else:
raise TypeError("self must be a row or a column matrix")
if X.shape[0] == 1:
n = X.shape[1]
elif X.shape[1] == 1:
n = X.shape[0]
else:
raise TypeError("X must be a row or a column matrix")
# m is the number of functions and n is the number of variables
# computing the Jacobian is now easy:
return self._new(m, n, lambda j, i: self[j].diff(X[i]))
def QRdecomposition(self):
"""Return Q, R where A = Q*R, Q is orthogonal and R is upper triangular.
Examples
========
This is the example from wikipedia:
>>> from sympy import Matrix
>>> A = Matrix([[12, -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[ 6/7, -69/175, -58/175],
[ 3/7, 158/175, 6/175],
[-2/7, 6/35, -33/35]])
>>> R
Matrix([
[14, 21, -14],
[ 0, 175, -70],
[ 0, 0, 35]])
>>> A == Q*R
True
QR factorization of an identity matrix:
>>> A = Matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> Q, R = A.QRdecomposition()
>>> Q
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> R
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
cholesky
LDLdecomposition
LUdecomposition
QRsolve
"""
cls = self.__class__
self = self.as_mutable()
if not self.rows >= self.cols:
raise MatrixError(
"The number of rows must be greater than columns")
n = self.rows
m = self.cols
rank = n
row_reduced = self.rref()[0]
for i in range(row_reduced.rows):
if row_reduced.row(i).norm() == 0:
rank -= 1
if not rank == self.cols:
raise MatrixError("The rank of the matrix must match the columns")
Q, R = self.zeros(n, m), self.zeros(m)
for j in range(m): # for each column vector
tmp = self[:, j] # take original v
for i in range(j):
# subtract the project of self on new vector
tmp -= Q[:, i]*self[:, j].dot(Q[:, i])
tmp.expand()
# normalize it
R[j, j] = tmp.norm()
Q[:, j] = tmp / R[j, j]
if Q[:, j].norm() != 1:
raise NotImplementedError(
"Could not normalize the vector %d." % j)
for i in range(j):
R[i, j] = Q[:, i].dot(self[:, j])
return cls(Q), cls(R)
def QRsolve(self, b):
"""Solve the linear system 'Ax = b'.
'self' is the matrix 'A', the method argument is the vector
'b'. The method returns the solution vector 'x'. If 'b' is a
matrix, the system is solved for each column of 'b' and the
return value is a matrix of the same shape as 'b'.
This method is slower (approximately by a factor of 2) but
more stable for floating-point arithmetic than the LUsolve method.
However, LUsolve usually uses an exact arithmetic, so you don't need
to use QRsolve.
This is mainly for educational purposes and symbolic matrices, for real
(or complex) matrices use sympy.mpmath.qr_solve.
See Also
========
lower_triangular_solve
upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
pinv_solve
QRdecomposition
"""
Q, R = self.as_mutable().QRdecomposition()
y = Q.T*b
# back substitution to solve R*x = y:
# We build up the result "backwards" in the vector 'x' and reverse it
# only in the end.
x = []
n = R.rows
for j in range(n - 1, -1, -1):
tmp = y[j, :]
for k in range(j + 1, n):
tmp -= R[j, k]*x[n - 1 - k]
x.append(tmp / R[j, j])
return self._new([row._mat for row in reversed(x)])
def cross(self, b):
"""Return the cross product of `self` and `b` relaxing the condition
of compatible dimensions: if each has 3 elements, a matrix of the
same type and shape as `self` will be returned. If `b` has the same
shape as `self` then common identities for the cross product (like
`a x b = - b x a`) will hold.
See Also
========
dot
multiply
multiply_elementwise
"""
if not is_sequence(b):
raise TypeError("`b` must be an ordered iterable or Matrix, not %s." %
type(b))
if not (self.rows * self.cols == b.rows * b.cols == 3):
raise ShapeError("Dimensions incorrect for cross product.")
else:
return self._new(self.rows, self.cols, (
(self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def dot(self, b):
"""Return the dot product of Matrix self and b relaxing the condition
of compatible dimensions: if either the number of rows or columns are
the same as the length of b then the dot product is returned. If self
is a row or column vector, a scalar is returned. Otherwise, a list
of results is returned (and in that case the number of columns in self
must match the length of b).
Examples
========
>>> from sympy import Matrix
>>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> v = [1, 1, 1]
>>> M.row(0).dot(v)
6
>>> M.col(0).dot(v)
12
>>> M.dot(v)
[6, 15, 24]
See Also
========
cross
multiply
multiply_elementwise
"""
from .dense import Matrix
if not isinstance(b, MatrixBase):
if is_sequence(b):
if len(b) != self.cols and len(b) != self.rows:
raise ShapeError("Dimensions incorrect for dot product.")
return self.dot(Matrix(b))
else:
raise TypeError("`b` must be an ordered iterable or Matrix, not %s." %
type(b))
if self.cols == b.rows:
if b.cols != 1:
self = self.T
b = b.T
prod = flatten((self*b).tolist())
if len(prod) == 1:
return prod[0]
return prod
if self.cols == b.cols:
return self.dot(b.T)
elif self.rows == b.rows:
return self.T.dot(b)
else:
raise ShapeError("Dimensions incorrect for dot product.")
def multiply_elementwise(self, b):
"""Return the Hadamard product (elementwise product) of A and B
Examples
========
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> A.multiply_elementwise(B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
cross
dot
multiply
"""
from sympy.matrices import matrix_multiply_elementwise
return matrix_multiply_elementwise(self, b)
def values(self):
"""Return non-zero values of self."""
return [i for i in flatten(self.tolist()) if not i.is_zero]
def norm(self, ord=None):
"""Return the Norm of a Matrix or Vector.
In the simplest case this is the geometric size of the vector
Other norms can be specified by the ord parameter
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm - does not exist
inf -- max(abs(x))
-inf -- min(abs(x))
1 -- as below
-1 -- as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other - does not exist sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
Examples
========
>>> from sympy import Matrix, Symbol, trigsimp, cos, sin, oo
>>> x = Symbol('x', real=True)
>>> v = Matrix([cos(x), sin(x)])
>>> trigsimp( v.norm() )
1
>>> v.norm(10)
(sin(x)**10 + cos(x)**10)**(1/10)
>>> A = Matrix([[1, 1], [1, 1]])
>>> A.norm(2)# Spectral norm (max of |Ax|/|x| under 2-vector-norm)
2
>>> A.norm(-2) # Inverse spectral norm (smallest singular value)
0
>>> A.norm() # Frobenius Norm
2
>>> Matrix([1, -2]).norm(oo)
2
>>> Matrix([-1, 2]).norm(-oo)
1
See Also
========
normalized
"""
# Row or Column Vector Norms
vals = list(self.values()) or [0]
if self.rows == 1 or self.cols == 1:
if ord == 2 or ord is None: # Common case sqrt(<x, x>)
return sqrt(Add(*(abs(i)**2 for i in vals)))
elif ord == 1: # sum(abs(x))
return Add(*(abs(i) for i in vals))
elif ord == S.Infinity: # max(abs(x))
return Max(*[abs(i) for i in vals])
elif ord == S.NegativeInfinity: # min(abs(x))
return Min(*[abs(i) for i in vals])
# Otherwise generalize the 2-norm, Sum(x_i**ord)**(1/ord)
# Note that while useful this is not mathematically a norm
try:
return Pow(Add(*(abs(i)**ord for i in vals)), S(1) / ord)
except (NotImplementedError, TypeError):
raise ValueError("Expected order to be Number, Symbol, oo")
# Matrix Norms
else:
if ord == 2: # Spectral Norm
# Maximum singular value
return Max(*self.singular_values())
elif ord == -2:
# Minimum singular value
return Min(*self.singular_values())
elif (ord is None or isinstance(ord, string_types) and ord.lower() in
['f', 'fro', 'frobenius', 'vector']):
# Reshape as vector and send back to norm function
return self.vec().norm(ord=2)
else:
raise NotImplementedError("Matrix Norms under development")
def normalized(self):
"""Return the normalized version of ``self``.
See Also
========
norm
"""
if self.rows != 1 and self.cols != 1:
raise ShapeError("A Matrix must be a vector to normalize.")
norm = self.norm()
out = self.applyfunc(lambda i: i / norm)
return out
def project(self, v):
"""Return the projection of ``self`` onto the line containing ``v``.
Examples
========
>>> from sympy import Matrix, S, sqrt
>>> V = Matrix([sqrt(3)/2, S.Half])
>>> x = Matrix([[1, 0]])
>>> V.project(x)
Matrix([[sqrt(3)/2, 0]])
>>> V.project(-x)
Matrix([[sqrt(3)/2, 0]])
"""
return v*(self.dot(v) / v.dot(v))
def permuteBkwd(self, perm):
"""Permute the rows of the matrix with the given permutation in reverse.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permuteBkwd([[0, 1], [0, 2]])
Matrix([
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]])
See Also
========
permuteFwd
"""
copy = self.copy()
for i in range(len(perm) - 1, -1, -1):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def permuteFwd(self, perm):
"""Permute the rows of the matrix with the given permutation.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.permuteFwd([[0, 1], [0, 2]])
Matrix([
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
See Also
========
permuteBkwd
"""
copy = self.copy()
for i in range(len(perm)):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def exp(self):
"""Return the exponentiation of a square matrix."""
if not self.is_square:
raise NonSquareMatrixError(
"Exponentiation is valid only for square matrices")
try:
P, cells = self.jordan_cells()
except MatrixError:
raise NotImplementedError("Exponentiation is implemented only for matrices for which the Jordan normal form can be computed")
def _jblock_exponential(b):
# This function computes the matrix exponential for one single Jordan block
nr = b.rows
l = b[0, 0]
if nr == 1:
res = C.exp(l)
else:
from sympy import eye
# extract the diagonal part
d = b[0, 0]*eye(nr)
#and the nilpotent part
n = b-d
# compute its exponential
nex = eye(nr)
for i in range(1, nr):
nex = nex+n**i/factorial(i)
# combine the two parts
res = exp(b[0, 0])*nex
return(res)
blocks = list(map(_jblock_exponential, cells))
from sympy.matrices import diag
eJ = diag(* blocks)
# n = self.rows
ret = P*eJ*P.inv()
return type(self)(ret)
@property
def is_square(self):
"""Checks if a matrix is square.
A matrix is square if the number of rows equals the number of columns.
The empty matrix is square by definition, since the number of rows and
the number of columns are both zero.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[1, 2, 3], [4, 5, 6]])
>>> b = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> c = Matrix([])
>>> a.is_square
False
>>> b.is_square
True
>>> c.is_square
True
"""
return self.rows == self.cols
@property
def is_zero(self):
"""Checks if a matrix is a zero matrix.
A matrix is zero if every element is zero. A matrix need not be square
to be considered zero. The empty matrix is zero by the principle of
vacuous truth. For a matrix that may or may not be zero (e.g.
contains a symbol), this will be None
Examples
========
>>> from sympy import Matrix, zeros
>>> from sympy.abc import x
>>> a = Matrix([[0, 0], [0, 0]])
>>> b = zeros(3, 4)
>>> c = Matrix([[0, 1], [0, 0]])
>>> d = Matrix([])
>>> e = Matrix([[x, 0], [0, 0]])
>>> a.is_zero
True
>>> b.is_zero
True
>>> c.is_zero
False
>>> d.is_zero
True
>>> e.is_zero
"""
if any(i.is_zero == False for i in self):
return False
if any(i.is_zero == None for i in self):
return None
return True
def is_nilpotent(self):
"""Checks if a matrix is nilpotent.
A matrix B is nilpotent if for some integer k, B**k is
a zero matrix.
Examples
========
>>> from sympy import Matrix
>>> a = Matrix([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
True
>>> a = Matrix([[1, 0, 1], [1, 0, 0], [1, 1, 0]])
>>> a.is_nilpotent()
False
"""
if not self.is_square:
raise NonSquareMatrixError(
"Nilpotency is valid only for square matrices")
x = Dummy('x')
if self.charpoly(x).args[0] == x**self.rows:
return True
return False
@property
def is_upper(self):
"""Check if matrix is an upper triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_upper
True
>>> m = Matrix(4, 3, [5, 1, 9, 0, 4 , 6, 0, 0, 5, 0, 0, 0])
>>> m
Matrix([
[5, 1, 9],
[0, 4, 6],
[0, 0, 5],
[0, 0, 0]])
>>> m.is_upper
True
>>> m = Matrix(2, 3, [4, 2, 5, 6, 1, 1])
>>> m
Matrix([
[4, 2, 5],
[6, 1, 1]])
>>> m.is_upper
False
See Also
========
is_lower
is_diagonal
is_upper_hessenberg
"""
return all(self[i, j].is_zero
for i in range(1, self.rows)
for j in range(i))
@property
def is_lower(self):
"""Check if matrix is a lower triangular matrix. True can be returned
even if the matrix is not square.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [1, 0, 0, 1])
>>> m
Matrix([
[1, 0],
[0, 1]])
>>> m.is_lower
True
>>> m = Matrix(4, 3, [0, 0, 0, 2, 0, 0, 1, 4 , 0, 6, 6, 5])
>>> m
Matrix([
[0, 0, 0],
[2, 0, 0],
[1, 4, 0],
[6, 6, 5]])
>>> m.is_lower
True
>>> from sympy.abc import x, y
>>> m = Matrix(2, 2, [x**2 + y, y**2 + x, 0, x + y])
>>> m
Matrix([
[x**2 + y, x + y**2],
[ 0, x + y]])
>>> m.is_lower
False
See Also
========
is_upper
is_diagonal
is_lower_hessenberg
"""
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 1, self.cols))
@property
def is_hermitian(self):
"""Checks if the matrix is Hermitian.
In a Hermitian matrix element i,j is the complex conjugate of
element j,i.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy import I
>>> from sympy.abc import x
>>> a = Matrix([[1, I], [-I, 1]])
>>> a
Matrix([
[ 1, I],
[-I, 1]])
>>> a.is_hermitian
True
>>> a[0, 0] = 2*I
>>> a.is_hermitian
False
>>> a[0, 0] = x
>>> a.is_hermitian
>>> a[0, 1] = a[1, 0]*I
>>> a.is_hermitian
False
"""
def cond():
yield self.is_square
yield _fuzzy_group_inverse(
self[i, i].is_real for i in range(self.rows))
yield _fuzzy_group_inverse(
(self[i, j] - self[j, i].conjugate()).is_zero
for i in range(self.rows)
for j in range(i + 1, self.cols))
return fuzzy_and(i for i in cond())
@property
def is_upper_hessenberg(self):
"""Checks if the matrix is the upper-Hessenberg form.
The upper hessenberg matrix has zero entries
below the first subdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 4, 2, 3], [3, 4, 1, 7], [0, 2, 3, 4], [0, 0, 1, 3]])
>>> a
Matrix([
[1, 4, 2, 3],
[3, 4, 1, 7],
[0, 2, 3, 4],
[0, 0, 1, 3]])
>>> a.is_upper_hessenberg
True
See Also
========
is_lower_hessenberg
is_upper
"""
return all(self[i, j].is_zero
for i in range(2, self.rows)
for j in range(i - 1))
@property
def is_lower_hessenberg(self):
r"""Checks if the matrix is in the lower-Hessenberg form.
The lower hessenberg matrix has zero entries
above the first superdiagonal.
Examples
========
>>> from sympy.matrices import Matrix
>>> a = Matrix([[1, 2, 0, 0], [5, 2, 3, 0], [3, 4, 3, 7], [5, 6, 1, 1]])
>>> a
Matrix([
[1, 2, 0, 0],
[5, 2, 3, 0],
[3, 4, 3, 7],
[5, 6, 1, 1]])
>>> a.is_lower_hessenberg
True
See Also
========
is_upper_hessenberg
is_lower
"""
return all(self[i, j].is_zero
for i in range(self.rows)
for j in range(i + 2, self.cols))
def is_symbolic(self):
"""Checks if any elements contain Symbols.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.is_symbolic()
True
"""
return any(element.has(Symbol) for element in self.values())
def is_symmetric(self, simplify=True):
"""Check if matrix is symmetric matrix,
that is square matrix and is equal to its transpose.
By default, simplifications occur before testing symmetry.
They can be skipped using 'simplify=False'; while speeding things a bit,
this may however induce false negatives.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, [0, 1, 1, 2])
>>> m
Matrix([
[0, 1],
[1, 2]])
>>> m.is_symmetric()
True
>>> m = Matrix(2, 2, [0, 1, 2, 0])
>>> m
Matrix([
[0, 1],
[2, 0]])
>>> m.is_symmetric()
False
>>> m = Matrix(2, 3, [0, 0, 0, 0, 0, 0])
>>> m
Matrix([
[0, 0, 0],
[0, 0, 0]])
>>> m.is_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2 , 2, 0, y, 0, 3])
>>> m
Matrix([
[ 1, x**2 + 2*x + 1, y],
[(x + 1)**2, 2, 0],
[ y, 0, 3]])
>>> m.is_symmetric()
True
If the matrix is already simplified, you may speed-up is_symmetric()
test by using 'simplify=False'.
>>> m.is_symmetric(simplify=False)
False
>>> m1 = m.expand()
>>> m1.is_symmetric(simplify=False)
True
"""
if not self.is_square:
return False
if simplify:
delta = self - self.transpose()
delta.simplify()
return delta.equals(self.zeros(self.rows, self.cols))
else:
return self == self.transpose()
def is_anti_symmetric(self, simplify=True):
"""Check if matrix M is an antisymmetric matrix,
that is, M is a square matrix with all M[i, j] == -M[j, i].
When ``simplify=True`` (default), the sum M[i, j] + M[j, i] is
simplified before testing to see if it is zero. By default,
the SymPy simplify function is used. To use a custom function
set simplify to a function that accepts a single argument which
returns a simplified expression. To skip simplification, set
simplify to False but note that although this will be faster,
it may induce false negatives.
Examples
========
>>> from sympy import Matrix, symbols
>>> m = Matrix(2, 2, [0, 1, -1, 0])
>>> m
Matrix([
[ 0, 1],
[-1, 0]])
>>> m.is_anti_symmetric()
True
>>> x, y = symbols('x y')
>>> m = Matrix(2, 3, [0, 0, x, -y, 0, 0])
>>> m
Matrix([
[ 0, 0, x],
[-y, 0, 0]])
>>> m.is_anti_symmetric()
False
>>> from sympy.abc import x, y
>>> m = Matrix(3, 3, [0, x**2 + 2*x + 1, y,
... -(x + 1)**2 , 0, x*y,
... -y, -x*y, 0])
Simplification of matrix elements is done by default so even
though two elements which should be equal and opposite wouldn't
pass an equality test, the matrix is still reported as
anti-symmetric:
>>> m[0, 1] == -m[1, 0]
False
>>> m.is_anti_symmetric()
True
If 'simplify=False' is used for the case when a Matrix is already
simplified, this will speed things up. Here, we see that without
simplification the matrix does not appear anti-symmetric:
>>> m.is_anti_symmetric(simplify=False)
False
But if the matrix were already expanded, then it would appear
anti-symmetric and simplification in the is_anti_symmetric routine
is not needed:
>>> m = m.expand()
>>> m.is_anti_symmetric(simplify=False)
True
"""
# accept custom simplification
simpfunc = simplify if isinstance(simplify, FunctionType) else \
_simplify if simplify else False
if not self.is_square:
return False
n = self.rows
if simplify:
for i in range(n):
# diagonal
if not simpfunc(self[i, i]).is_zero:
return False
# others
for j in range(i + 1, n):
diff = self[i, j] + self[j, i]
if not simpfunc(diff).is_zero:
return False
return True
else:
for i in range(n):
for j in range(i, n):
if self[i, j] != -self[j, i]:
return False
return True
def is_diagonal(self):
"""Check if matrix is diagonal,
that is matrix in which the entries outside the main diagonal are all zero.
Examples
========
>>> from sympy import Matrix, diag
>>> m = Matrix(2, 2, [1, 0, 0, 2])
>>> m
Matrix([
[1, 0],
[0, 2]])
>>> m.is_diagonal()
True
>>> m = Matrix(2, 2, [1, 1, 0, 2])
>>> m
Matrix([
[1, 1],
[0, 2]])
>>> m.is_diagonal()
False
>>> m = diag(1, 2, 3)
>>> m
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> m.is_diagonal()
True
See Also
========
is_lower
is_upper
is_diagonalizable
diagonalize
"""
for i in range(self.rows):
for j in range(self.cols):
if i != j and self[i, j]:
return False
return True
def det(self, method="bareis"):
"""Computes the matrix determinant using the method "method".
Possible values for "method":
bareis ... det_bareis
berkowitz ... berkowitz_det
det_LU ... det_LU_decomposition
See Also
========
det_bareis
berkowitz_det
det_LU
"""
# if methods were made internal and all determinant calculations
# passed through here, then these lines could be factored out of
# the method routines
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
if method == "bareis":
return self.det_bareis()
elif method == "berkowitz":
return self.berkowitz_det()
elif method == "det_LU":
return self.det_LU_decomposition()
else:
raise ValueError("Determinant method '%s' unrecognized" % method)
def det_bareis(self):
"""Compute matrix determinant using Bareis' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also
========
det
berkowitz_det
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
M, n = self.copy().as_mutable(), self.rows
if n == 1:
det = M[0, 0]
elif n == 2:
det = M[0, 0]*M[1, 1] - M[0, 1]*M[1, 0]
elif n == 3:
det = (M[0, 0]*M[1, 1]*M[2, 2] + M[0, 1]*M[1, 2]*M[2, 0] + M[0, 2]*M[1, 0]*M[2, 1]) - \
(M[0, 2]*M[1, 1]*M[2, 0] + M[0, 0]*M[1, 2]*M[2, 1] + M[0, 1]*M[1, 0]*M[2, 2])
else:
sign = 1 # track current sign in case of column swap
for k in range(n - 1):
# look for a pivot in the current column
# and assume det == 0 if none is found
if M[k, k] == 0:
for i in range(k + 1, n):
if M[i, k]:
M.row_swap(i, k)
sign *= -1
break
else:
return S.Zero
# proceed with Bareis' fraction-free (FF)
# form of Gaussian elimination algorithm
for i in range(k + 1, n):
for j in range(k + 1, n):
D = M[k, k]*M[i, j] - M[i, k]*M[k, j]
if k > 0:
D /= M[k - 1, k - 1]
if D.is_Atom:
M[i, j] = D
else:
M[i, j] = cancel(D)
det = sign*M[n - 1, n - 1]
return det.expand()
def det_LU_decomposition(self):
"""Compute matrix determinant using LU decomposition
Note that this method fails if the LU decomposition itself
fails. In particular, if the matrix has no inverse this method
will fail.
TODO: Implement algorithm for sparse matrices (SFF),
http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps.
See Also
========
det
det_bareis
berkowitz_det
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
M, n = self.copy(), self.rows
p, prod = [], 1
l, u, p = M.LUdecomposition()
if len(p) % 2:
prod = -1
for k in range(n):
prod = prod*u[k, k]*l[k, k]
return prod.expand()
def adjugate(self, method="berkowitz"):
"""Returns the adjugate matrix.
Adjugate matrix is the transpose of the cofactor matrix.
http://en.wikipedia.org/wiki/Adjugate
See Also
========
cofactorMatrix
transpose
berkowitz
"""
return self.cofactorMatrix(method).T
def inverse_LU(self, iszerofunc=_iszero):
"""Calculates the inverse using LU decomposition.
See Also
========
inv
inverse_GE
inverse_ADJ
"""
if not self.is_square:
raise NonSquareMatrixError()
ok = self.rref(simplify=True)[0]
if any(iszerofunc(ok[j, j]) for j in range(ok.rows)):
raise ValueError("Matrix det == 0; not invertible.")
return self.LUsolve(self.eye(self.rows), iszerofunc=_iszero)
def inverse_GE(self, iszerofunc=_iszero):
"""Calculates the inverse using Gaussian elimination.
See Also
========
inv
inverse_LU
inverse_ADJ
"""
from .dense import Matrix
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
big = Matrix.hstack(self.as_mutable(), Matrix.eye(self.rows))
red = big.rref(iszerofunc=iszerofunc, simplify=True)[0]
if any(iszerofunc(red[j, j]) for j in range(red.rows)):
raise ValueError("Matrix det == 0; not invertible.")
return self._new(red[:, big.rows:])
def inverse_ADJ(self, iszerofunc=_iszero):
"""Calculates the inverse using the adjugate matrix and a determinant.
See Also
========
inv
inverse_LU
inverse_GE
"""
if not self.is_square:
raise NonSquareMatrixError("A Matrix must be square to invert.")
d = self.berkowitz_det()
zero = d.equals(0)
if zero is None:
# if equals() can't decide, will rref be able to?
ok = self.rref(simplify=True)[0]
zero = any(iszerofunc(ok[j, j]) for j in range(ok.rows))
if zero:
raise ValueError("Matrix det == 0; not invertible.")
return self.adjugate() / d
def rref(self, iszerofunc=_iszero, simplify=False):
"""Return reduced row-echelon form of matrix and indices of pivot vars.
To simplify elements before finding nonzero pivots set simplify=True
(to use the default SymPy simplify function) or pass a custom
simplify function.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> m = Matrix([[1, 2], [x, 1 - 1/x]])
>>> m.rref()
(Matrix([
[1, 0],
[0, 1]]), [0, 1])
"""
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
# pivot: index of next row to contain a pivot
pivot, r = 0, self.as_mutable()
# pivotlist: indices of pivot variables (non-free)
pivotlist = []
for i in xrange(r.cols):
if pivot == r.rows:
break
if simplify:
r[pivot, i] = simpfunc(r[pivot, i])
if iszerofunc(r[pivot, i]):
for k in xrange(pivot, r.rows):
if simplify and k > pivot:
r[k, i] = simpfunc(r[k, i])
if not iszerofunc(r[k, i]):
r.row_swap(pivot, k)
break
else:
continue
scale = r[pivot, i]
r.row_op(pivot, lambda x, _: x / scale)
for j in xrange(r.rows):
if j == pivot:
continue
scale = r[j, i]
r.zip_row_op(j, pivot, lambda x, y: x - scale*y)
pivotlist.append(i)
pivot += 1
return self._new(r), pivotlist
def rank(self, iszerofunc=_iszero, simplify=False):
"""
Returns the rank of a matrix
>>> from sympy import Matrix
>>> from sympy.abc import x
>>> m = Matrix([[1, 2], [x, 1 - 1/x]])
>>> m.rank()
2
>>> n = Matrix(3, 3, range(1, 10))
>>> n.rank()
2
"""
row_reduced = self.rref(iszerofunc=iszerofunc, simplify=simplify)
rank = len(row_reduced[-1])
return rank
def nullspace(self, simplify=False):
"""Returns list of vectors (Matrix objects) that span nullspace of self
"""
from sympy.matrices import zeros
simpfunc = simplify if isinstance(
simplify, FunctionType) else _simplify
reduced, pivots = self.rref(simplify=simpfunc)
basis = []
# create a set of vectors for the basis
for i in range(self.cols - len(pivots)):
basis.append(zeros(self.cols, 1))
# contains the variable index to which the vector corresponds
basiskey, cur = [-1]*len(basis), 0
for i in range(self.cols):
if i not in pivots:
basiskey[cur] = i
cur += 1
for i in range(self.cols):
if i not in pivots: # free var, just set vector's ith place to 1
basis[basiskey.index(i)][i, 0] = 1
else: # add negative of nonpivot entry to corr vector
for j in range(i + 1, self.cols):
line = pivots.index(i)
v = reduced[line, j]
if simplify:
v = simpfunc(v)
if v:
if j in pivots:
# XXX: Is this the correct error?
raise NotImplementedError(
"Could not compute the nullspace of `self`.")
basis[basiskey.index(j)][i, 0] = -v
return [self._new(b) for b in basis]
def berkowitz(self):
"""The Berkowitz algorithm.
Given N x N matrix with symbolic content, compute efficiently
coefficients of characteristic polynomials of 'self' and all
its square sub-matrices composed by removing both i-th row
and column, without division in the ground domain.
This method is particularly useful for computing determinant,
principal minors and characteristic polynomial, when 'self'
has complicated coefficients e.g. polynomials. Semi-direct
usage of this algorithm is also important in computing
efficiently sub-resultant PRS.
Assuming that M is a square matrix of dimension N x N and
I is N x N identity matrix, then the following following
definition of characteristic polynomial is begin used:
charpoly(M) = det(t*I - M)
As a consequence, all polynomials generated by Berkowitz
algorithm are monic.
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> M = Matrix([[x, y, z], [1, 0, 0], [y, z, x]])
>>> p, q, r = M.berkowitz()
>>> p # 1 x 1 M's sub-matrix
(1, -x)
>>> q # 2 x 2 M's sub-matrix
(1, -x, -y)
>>> r # 3 x 3 M's sub-matrix
(1, -2*x, x**2 - y*z - y, x*y - z**2)
For more information on the implemented algorithm refer to:
[1] S.J. Berkowitz, On computing the determinant in small
parallel time using a small number of processors, ACM,
Information Processing Letters 18, 1984, pp. 147-150
[2] M. Keber, Division-Free computation of sub-resultants
using Bezout matrices, Tech. Report MPI-I-2006-1-006,
Saarbrucken, 2006
See Also
========
berkowitz_det
berkowitz_minors
berkowitz_charpoly
berkowitz_eigenvals
"""
from sympy.matrices import zeros
if not self.is_square:
raise NonSquareMatrixError()
A, N = self, self.rows
transforms = [0]*(N - 1)
for n in range(N, 1, -1):
T, k = zeros(n + 1, n), n - 1
R, C = -A[k, :k], A[:k, k]
A, a = A[:k, :k], -A[k, k]
items = [C]
for i in range(0, n - 2):
items.append(A*items[i])
for i, B in enumerate(items):
items[i] = (R*B)[0, 0]
items = [S.One, a] + items
for i in range(n):
T[i:, i] = items[:n - i + 1]
transforms[k - 1] = T
polys = [self._new([S.One, -A[0, 0]])]
for i, T in enumerate(transforms):
polys.append(T*polys[i])
return tuple(map(tuple, polys))
def berkowitz_det(self):
"""Computes determinant using Berkowitz method.
See Also
========
det
berkowitz
"""
if not self.is_square:
raise NonSquareMatrixError()
if not self:
return S.One
poly = self.berkowitz()[-1]
sign = (-1)**(len(poly) - 1)
return sign*poly[-1]
def berkowitz_minors(self):
"""Computes principal minors using Berkowitz method.
See Also
========
berkowitz
"""
sign, minors = S.NegativeOne, []
for poly in self.berkowitz():
minors.append(sign*poly[-1])
sign = -sign
return tuple(minors)
def berkowitz_charpoly(self, x=Dummy('lambda'), simplify=_simplify):
"""Computes characteristic polynomial minors using Berkowitz method.
A PurePoly is returned so using different variables for ``x`` does
not affect the comparison or the polynomials:
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y
>>> A = Matrix([[1, 3], [2, 0]])
>>> A.berkowitz_charpoly(x) == A.berkowitz_charpoly(y)
True
Specifying ``x`` is optional; a Dummy with name ``lambda`` is used by
default (which looks good when pretty-printed in unicode):
>>> A.berkowitz_charpoly().as_expr()
_lambda**2 - _lambda - 6
No test is done to see that ``x`` doesn't clash with an existing
symbol, so using the default (``lambda``) or your own Dummy symbol is
the safest option:
>>> A = Matrix([[1, 2], [x, 0]])
>>> A.charpoly().as_expr()
_lambda**2 - _lambda - 2*x
>>> A.charpoly(x).as_expr()
x**2 - 3*x
See Also
========
berkowitz
"""
return PurePoly(list(map(simplify, self.berkowitz()[-1])), x)
charpoly = berkowitz_charpoly
def berkowitz_eigenvals(self, **flags):
"""Computes eigenvalues of a Matrix using Berkowitz method.
See Also
========
berkowitz
"""
return roots(self.berkowitz_charpoly(Dummy('x')), **flags)
def eigenvals(self, **flags):
"""Return eigen values using the berkowitz_eigenvals routine.
Since the roots routine doesn't always work well with Floats,
they will be replaced with Rationals before calling that
routine. If this is not desired, set flag ``rational`` to False.
"""
# roots doesn't like Floats, so replace them with Rationals
# unless the nsimplify flag indicates that this has already
# been done, e.g. in eigenvects
if flags.pop('rational', True):
if any(v.has(Float) for v in self):
self = self._new(self.rows, self.cols,
[nsimplify(v, rational=True) for v in self])
flags.pop('simplify', None) # pop unsupported flag
return self.berkowitz_eigenvals(**flags)
def eigenvects(self, **flags):
"""Return list of triples (eigenval, multiplicity, basis).
The flag ``simplify`` has two effects:
1) if bool(simplify) is True, as_content_primitive()
will be used to tidy up normalization artifacts;
2) if nullspace needs simplification to compute the
basis, the simplify flag will be passed on to the
nullspace routine which will interpret it there.
If the matrix contains any Floats, they will be changed to Rationals
for computation purposes, but the answers will be returned after being
evaluated with evalf. If it is desired to removed small imaginary
portions during the evalf step, pass a value for the ``chop`` flag.
"""
from sympy.matrices import eye
simplify = flags.get('simplify', True)
primitive = bool(flags.get('simplify', False))
chop = flags.pop('chop', False)
flags.pop('multiple', None) # remove this if it's there
# roots doesn't like Floats, so replace them with Rationals
float = False
if any(v.has(Float) for v in self):
float = True
self = self._new(self.rows, self.cols, [nsimplify(
v, rational=True) for v in self])
flags['rational'] = False # to tell eigenvals not to do this
out, vlist = [], self.eigenvals(**flags)
vlist = list(vlist.items())
vlist.sort(key=default_sort_key)
flags.pop('rational', None)
for r, k in vlist:
tmp = self.as_mutable() - eye(self.rows)*r
basis = tmp.nullspace()
# whether tmp.is_symbolic() is True or False, it is possible that
# the basis will come back as [] in which case simplification is
# necessary.
if not basis:
# The nullspace routine failed, try it again with simplification
basis = tmp.nullspace(simplify=simplify)
if not basis:
raise NotImplementedError(
"Can't evaluate eigenvector for eigenvalue %s" % r)
if primitive:
# the relationship A*e = lambda*e will still hold if we change the
# eigenvector; so if simplify is True we tidy up any normalization
# artifacts with as_content_primtive (default) and remove any pure Integer
# denominators.
l = 1
for i, b in enumerate(basis[0]):
c, p = signsimp(b).as_content_primitive()
if c is not S.One:
b = c*p
l = ilcm(l, c.q)
basis[0][i] = b
if l != 1:
basis[0] *= l
if float:
out.append((r.evalf(chop=chop), k, [
self._new(b).evalf(chop=chop) for b in basis]))
else:
out.append((r, k, [self._new(b) for b in basis]))
return out
def singular_values(self):
"""Compute the singular values of a Matrix
Examples
========
>>> from sympy import Matrix, Symbol
>>> x = Symbol('x', real=True)
>>> A = Matrix([[0, 1, 0], [0, x, 0], [-1, 0, 0]])
>>> A.singular_values()
[sqrt(x**2 + 1), 1, 0]
See Also
========
condition_number
"""
self = self.as_mutable()
# Compute eigenvalues of A.H A
valmultpairs = (self.H*self).eigenvals()
# Expands result from eigenvals into a simple list
vals = []
for k, v in valmultpairs.items():
vals += [sqrt(k)]*v # dangerous! same k in several spots!
# sort them in descending order
vals.sort(reverse=True, key=default_sort_key)
return vals
def condition_number(self):
"""Returns the condition number of a matrix.
This is the maximum singular value divided by the minimum singular value
Examples
========
>>> from sympy import Matrix, S
>>> A = Matrix([[1, 0, 0], [0, 10, 0], [0, 0, S.One/10]])
>>> A.condition_number()
100
See Also
========
singular_values
"""
singularvalues = self.singular_values()
return Max(*singularvalues) / Min(*singularvalues)
def __getattr__(self, attr):
if attr in ('diff', 'integrate', 'limit'):
def doit(*args):
item_doit = lambda item: getattr(item, attr)(*args)
return self.applyfunc(item_doit)
return doit
else:
raise AttributeError(
"%s has no attribute %s." % (self.__class__.__name__, attr))
def integrate(self, *args):
"""Integrate each element of the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.integrate((x, ))
Matrix([
[x**2/2, x*y],
[ x, 0]])
>>> M.integrate((x, 0, 2))
Matrix([
[2, 2*y],
[2, 0]])
See Also
========
limit
diff
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].integrate(*args))
def limit(self, *args):
"""Calculate the limit of each element in the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.limit(x, 2)
Matrix([
[2, y],
[1, 0]])
See Also
========
integrate
diff
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].limit(*args))
def diff(self, *args):
"""Calculate the derivative of each element in the matrix.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x, y
>>> M = Matrix([[x, y], [1, 0]])
>>> M.diff(x)
Matrix([
[1, 0],
[0, 0]])
See Also
========
integrate
limit
"""
return self._new(self.rows, self.cols,
lambda i, j: self[i, j].diff(*args))
def vec(self):
"""Return the Matrix converted into a one column matrix by stacking columns
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 3], [2, 4]])
>>> m
Matrix([
[1, 3],
[2, 4]])
>>> m.vec()
Matrix([
[1],
[2],
[3],
[4]])
See Also
========
vech
"""
return self.T.reshape(len(self), 1)
def vech(self, diagonal=True, check_symmetry=True):
"""Return the unique elements of a symmetric Matrix as a one column matrix
by stacking the elements in the lower triangle.
Arguments:
diagonal -- include the diagonal cells of self or not
check_symmetry -- checks symmetry of self but not completely reliably
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 2], [2, 3]])
>>> m
Matrix([
[1, 2],
[2, 3]])
>>> m.vech()
Matrix([
[1],
[2],
[3]])
>>> m.vech(diagonal=False)
Matrix([[2]])
See Also
========
vec
"""
from sympy.matrices import zeros
c = self.cols
if c != self.rows:
raise ShapeError("Matrix must be square")
if check_symmetry:
self.simplify()
if self != self.transpose():
raise ValueError("Matrix appears to be asymmetric; consider check_symmetry=False")
count = 0
if diagonal:
v = zeros(c*(c + 1) // 2, 1)
for j in range(c):
for i in range(j, c):
v[count] = self[i, j]
count += 1
else:
v = zeros(c*(c - 1) // 2, 1)
for j in range(c):
for i in range(j + 1, c):
v[count] = self[i, j]
count += 1
return v
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
Matrix([
[1, 3],
[y, z**2]])
>>> a2
Matrix([[x]])
>>> a3
Matrix([[0]])
"""
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[:i, i:]
to_the_bottom = M[i:, :i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[:i, :i])
if M.shape == M[:i, :i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def diagonalize(self, reals_only=False, sort=False, normalize=False):
"""
Return (P, D), where D is diagonal and
D = P^-1 * M * P
where M is current matrix.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> m
Matrix([
[1, 2, 0],
[0, 3, 0],
[2, -4, 2]])
>>> (P, D) = m.diagonalize()
>>> D
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> P
Matrix([
[-1, 0, -1],
[ 0, 0, -1],
[ 2, 1, 2]])
>>> P.inv() * m * P
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
See Also
========
is_diagonal
is_diagonalizable
"""
from sympy.matrices import diag
if not self.is_square:
raise NonSquareMatrixError()
if not self.is_diagonalizable(reals_only, False):
self._diagonalize_clear_subproducts()
raise MatrixError("Matrix is not diagonalizable")
else:
if self._eigenvects is None:
self._eigenvects = self.eigenvects(simplify=True)
if sort:
self._eigenvects.sort(key=default_sort_key)
self._eigenvects.reverse()
diagvals = []
P = self._new(self.rows, 0, [])
for eigenval, multiplicity, vects in self._eigenvects:
for k in range(multiplicity):
diagvals.append(eigenval)
vec = vects[k]
if normalize:
vec = vec / vec.norm()
P = P.col_insert(P.cols, vec)
D = diag(*diagvals)
self._diagonalize_clear_subproducts()
return (P, D)
def is_diagonalizable(self, reals_only=False, clear_subproducts=True):
"""Check if matrix is diagonalizable.
If reals_only==True then check that diagonalized matrix consists of the only not complex values.
Some subproducts could be used further in other methods to avoid double calculations,
By default (if clear_subproducts==True) they will be deleted.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
>>> m
Matrix([
[1, 2, 0],
[0, 3, 0],
[2, -4, 2]])
>>> m.is_diagonalizable()
True
>>> m = Matrix(2, 2, [0, 1, 0, 0])
>>> m
Matrix([
[0, 1],
[0, 0]])
>>> m.is_diagonalizable()
False
>>> m = Matrix(2, 2, [0, 1, -1, 0])
>>> m
Matrix([
[ 0, 1],
[-1, 0]])
>>> m.is_diagonalizable()
True
>>> m.is_diagonalizable(True)
False
See Also
========
is_diagonal
diagonalize
"""
if not self.is_square:
return False
res = False
self._is_symbolic = self.is_symbolic()
self._is_symmetric = self.is_symmetric()
self._eigenvects = None
#if self._is_symbolic:
# self._diagonalize_clear_subproducts()
# raise NotImplementedError("Symbolic matrices are not implemented for diagonalization yet")
self._eigenvects = self.eigenvects(simplify=True)
all_iscorrect = True
for eigenval, multiplicity, vects in self._eigenvects:
if len(vects) != multiplicity:
all_iscorrect = False
break
elif reals_only and not eigenval.is_real:
all_iscorrect = False
break
res = all_iscorrect
if clear_subproducts:
self._diagonalize_clear_subproducts()
return res
def _diagonalize_clear_subproducts(self):
del self._is_symbolic
del self._is_symmetric
del self._eigenvects
def jordan_cell(self, eigenval, n):
n = int(n)
from sympy.matrices import MutableMatrix
out = MutableMatrix.zeros(n)
for i in range(n-1):
out[i, i] = eigenval
out[i, i+1] = 1
out[n-1, n-1] = eigenval
return type(self)(out)
def _jordan_block_structure(self):
# To every eingenvalue may belong `i` blocks with size s(i)
# and a chain of generalized eigenvectors
# which will be determined by the following computations:
# for every eigenvalue we will add a dictionary
# containing, for all blocks, the blockssizes and the attached chain vectors
# that will eventually be used to form the transformation P
jordan_block_structures = {}
_eigenvects = self.eigenvects()
ev = self.eigenvals()
if len(ev) == 0:
raise AttributeError("could not compute the eigenvalues")
for eigenval, multiplicity, vects in _eigenvects:
l_jordan_chains={}
geometrical = len(vects)
if geometrical == multiplicity:
# The Jordan chains have all length 1 and consist of only one vector
# which is the eigenvector of course
chains = []
for v in vects:
chain=[v]
chains.append(chain)
l_jordan_chains[1] = chains
jordan_block_structures[eigenval] = l_jordan_chains
elif geometrical == 0:
raise MatrixError("Matrix has the eigen vector with geometrical multiplicity equal zero.")
else:
# Up to now we know nothing about the sizes of the blocks of our Jordan matrix.
# Note that knowledge of algebraic and geometrical multiplicity
# will *NOT* be sufficient to determine this structure.
# The blocksize `s` could be defined as the minimal `k` where
# `kernel(self-lI)^k = kernel(self-lI)^(k+1)`
# The extreme case would be that k = (multiplicity-geometrical+1)
# but the blocks could be smaller.
# Consider for instance the following matrix
# [2 1 0 0]
# [0 2 1 0]
# [0 0 2 0]
# [0 0 0 2]
# which coincides with it own Jordan canonical form.
# It has only one eigenvalue l=2 of (algebraic) multiplicity=4.
# It has two eigenvectors, one belonging to the last row (blocksize 1)
# and one being the last part of a jordan chain of length 3 (blocksize of the first block).
# Note again that it is not not possible to obtain this from the algebraic and geometrical
# multiplicity alone. This only gives us an upper limit for the dimension of one of
# the subspaces (blocksize of according jordan block) given by
# max=(multiplicity-geometrical+1) which is reached for our matrix
# but not for
# [2 1 0 0]
# [0 2 0 0]
# [0 0 2 1]
# [0 0 0 2]
# although multiplicity=4 and geometrical=2 are the same for this matrix.
from sympy.matrices import MutableMatrix
I = MutableMatrix.eye(self.rows)
l = eigenval
M = (self-l*I)
# We will store the matrices `(self-l*I)^k` for further computations
# for convenience only we store `Ms[0]=(sefl-lI)^0=I`
# so the index is the same as the power for all further Ms entries
# We also store the vectors that span these kernels (Ns[0] = [])
# and also their dimensions `a_s`
# this is mainly done for debugging since the number of blocks of a given size
# can be computed from the a_s, in order to check our result which is obtained simpler
# by counting the number of jordanchains for `a` given `s`
# `a_0` is `dim(Kernel(Ms[0]) = dim (Kernel(I)) = 0` since `I` is regular
l_jordan_chains={}
chain_vectors=[]
Ms = [I]
Ns = [[]]
a = [0]
smax = 0
M_new = Ms[-1]*M
Ns_new = M_new.nullspace()
a_new = len(Ns_new)
Ms.append(M_new)
Ns.append(Ns_new)
while a_new > a[-1]: # as long as the nullspaces increase compute further powers
a.append(a_new)
M_new = Ms[-1]*M
Ns_new = M_new.nullspace()
a_new=len(Ns_new)
Ms.append(M_new)
Ns.append(Ns_new)
smax += 1
# We now have `Ms[-1]=((self-l*I)**s)=Z=0`
# We now know the size of the biggest jordan block
# associatet with `l` to be `s`
# now let us proceed with the computation of the associate part of the transformation matrix `P`
# We already know the kernel (=nullspace) `K_l` of (self-lI) which consists of the
# eigenvectors belonging to eigenvalue `l`
# The dimension of this space is the geometric multiplicity of eigenvalue `l`.
# For every eigenvector ev out of `K_l`, there exists a subspace that is
# spanned by the jordan chain of ev. The dimension of this subspace is
# represented by the length s of the jordan block.
# The chain itself is given by `{e_0,..,e_s-1}` where:
# `e_k+1 =(self-lI)e_k (*)`
# and
# `e_s-1=ev`
# So it would be possible to start with the already known `ev` and work backwards until one
# reaches `e_0`. Unfortunately this can not be done by simply solving system (*) since its matrix
# is singular (by definition of the eigenspaces).
# This approach would force us a choose in every step the degree of freedom undetermined
# by (*). This is difficult to implement with computer algebra systems and also quite unefficient.
# We therefore reformulate the problem in terms of nullspaces.
# To do so we start from the other end and choose `e0`'s out of
# `E=Kernel(self-lI)^s / Kernel(self-lI)^(s-1)`
# Note that `Kernel(self-lI)^s = Kernel(Z) = V` (the whole vector space).
# So in the first step `s=smax` this restriction turns out to actually restrict nothing at all
# and the only remaining condition is to choose vectors in `Kernel(self-lI)^(s-1)`.
# Subsequently we compute `e_1=(self-lI)e_0`, `e_2=(self-lI)*e_1` and so on.
# The subspace `E` can have a dimension larger than one.
# That means that we have more than one Jordanblocks of size `s` for the eigenvalue `l`
# and as many jordanchains (This is the case in the second example).
# In this case we start as many jordan chains and have as many blocks of size s in the jcf.
# We now have all the jordanblocks of size `s` but there might be others attached to the same
# eigenvalue that are smaller.
# So we will do the same procedure also for `s-1` and so on until 1 the lowest possible order
# where the jordanchain is of lenght 1 and just represented by the eigenvector.
for s in reversed(xrange(1, smax+1)):
S = Ms[s]
# We want the vectors in `Kernel((self-lI)^s)` (**),
# but without those in `Kernel(self-lI)^s-1` so we will add these as additional equations
# to the sytem formed by `S` (`S` will no longer be quadratic but this does not harm
# since S is rank deficiant).
exclude_vectors = Ns[s-1]
for k in range(0, a[s-1]):
S = S.col_join((exclude_vectors[k]).transpose())
# We also want to exclude the vectors in the chains for the bigger blogs
# that we have already computed (if there are any).
# (That is why we start wiht the biggest s).
######## Implementation remark: ########
# Doing so for *ALL* already computed chain vectors
# we actually exclude some vectors twice because they are already excluded
# by the condition (**).
# This happens if there are more than one blocks attached to the same eigenvalue *AND*
# the current blocksize is smaller than the block whose chain vectors we exclude.
# If the current block has size `s_i` and the next bigger block has size `s_i-1` then
# the first `s_i-s_i-1` chainvectors of the bigger block are allready excluded by (**).
# The unnecassary adding of these equations could be avoided if the algorithm would
# take into account the lengths of the already computed chains which are already stored
# and add only the last `s` items.
# However the following loop would be a good deal more nested to do so.
# Since adding a linear dependent equation does not change the result,
# it can harm only in terms of efficiency.
# So to be sure i let it there for the moment
# A more elegant alternative approach might be to drop condition (**) altogether
# because it is added implicitly by excluding the chainvectors but the original author
# of this code was not sure if this is correct in all cases.
l = len(chain_vectors)
if l > 0:
for k in range(0, l):
old = chain_vectors[k].transpose()
S = S.col_join(old)
e0s = S.nullspace()
# Determine the number of chain leaders which equals the number of blocks with that size.
n_e0 = len(e0s)
s_chains = []
# s_cells=[]
for i in range(0, n_e0):
chain=[e0s[i]]
for k in range(1, s):
v = M*chain[k-1]
chain.append(v)
# We want the chain leader appear as the last of the block.
chain.reverse()
chain_vectors += chain
s_chains.append(chain)
l_jordan_chains[s] = s_chains
jordan_block_structures[eigenval] = l_jordan_chains
return jordan_block_structures
def jordan_form(self, calc_transformation=True):
r"""Return Jordan form J of current matrix.
Also the transformation P such that
`J = P^{-1} \cdot M \cdot P`
and the jordan blocks forming J
will be calculated.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix([
... [ 6, 5, -2, -3],
... [-3, -1, 3, 3],
... [ 2, 1, -2, -3],
... [-1, 1, 5, 5]])
>>> P, J = m.jordan_form()
>>> J
Matrix([
[2, 1, 0, 0],
[0, 2, 0, 0],
[0, 0, 2, 1],
[0, 0, 0, 2]])
See Also
========
jordan_cells
"""
P, Jcells = self.jordan_cells()
from sympy.matrices import diag
J = diag(*Jcells)
return P, type(self)(J)
def jordan_cells(self, calc_transformation=True):
r"""Return a list of Jordan cells of current matrix.
This list shape Jordan matrix J.
If calc_transformation is specified as False, then transformation P such that
`J = P^{-1} \cdot M \cdot P`
will not be calculated.
Notes
=====
Calculation of transformation P is not implemented yet.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 4, [
... 6, 5, -2, -3,
... -3, -1, 3, 3,
... 2, 1, -2, -3,
... -1, 1, 5, 5])
>>> P, Jcells = m.jordan_cells()
>>> Jcells[0]
Matrix([
[2, 1],
[0, 2]])
>>> Jcells[1]
Matrix([
[2, 1],
[0, 2]])
See Also
========
jordan_form
"""
n = self.rows
Jcells = []
Pcols_new = []
jordan_block_structures = self._jordan_block_structure()
from sympy.matrices import MutableMatrix
# Order according to default_sort_key, this makes sure the order is the same as in .diagonalize():
for eigenval in (sorted(list(jordan_block_structures.keys()), key=default_sort_key)):
l_jordan_chains = jordan_block_structures[eigenval]
for s in reversed(sorted((l_jordan_chains).keys())): # Start with the biggest block
s_chains = l_jordan_chains[s]
block = self.jordan_cell(eigenval, s)
number_of_s_chains=len(s_chains)
for i in range(0, number_of_s_chains):
Jcells.append(type(self)(block))
chain_vectors = s_chains[i]
lc = len(chain_vectors)
assert lc == s
for j in range(0, lc):
generalized_eigen_vector = chain_vectors[j]
Pcols_new.append(generalized_eigen_vector)
P = MutableMatrix.zeros(n)
for j in range(0, n):
P[:, j] = Pcols_new[j]
return type(self)(P), Jcells
def _jordan_split(self, algebraical, geometrical):
"""Return a list of integers with sum equal to 'algebraical'
and length equal to 'geometrical'"""
n1 = algebraical // geometrical
res = [n1]*geometrical
res[len(res) - 1] += algebraical % geometrical
assert sum(res) == algebraical
return res
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import Matrix, Float
>>> from sympy.abc import x, y
>>> A = Matrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
"""
return any(a.has(*patterns) for a in self._mat)
def dual(self):
"""Returns the dual of a matrix, which is:
`(1/2)*levicivita(i, j, k, l)*M(k, l)` summed over indices `k` and `l`
Since the levicivita method is anti_symmetric for any pairwise
exchange of indices, the dual of a symmetric matrix is the zero
matrix. Strictly speaking the dual defined here assumes that the
'matrix' `M` is a contravariant anti_symmetric second rank tensor,
so that the dual is a covariant second rank tensor.
"""
from sympy import LeviCivita
from sympy.matrices import zeros
M, n = self[:, :], self.rows
work = zeros(n)
if self.is_symmetric():
return work
for i in range(1, n):
for j in range(1, n):
acum = 0
for k in range(1, n):
acum += LeviCivita(i, j, 0, k)*M[0, k]
work[i, j] = acum
work[j, i] = -acum
for l in range(1, n):
acum = 0
for a in range(1, n):
for b in range(1, n):
acum += LeviCivita(0, l, a, b)*M[a, b]
acum /= 2
work[0, l] = -acum
work[l, 0] = acum
return work
@classmethod
def hstack(cls, *args):
"""Return a matrix formed by joining args horizontally (i.e.
by repeated application of row_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.hstack(eye(2), 2*eye(2))
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2]])
"""
return reduce(cls.row_join, args)
@classmethod
def vstack(cls, *args):
"""Return a matrix formed by joining args vertically (i.e.
by repeated application of col_join).
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> Matrix.vstack(eye(2), 2*eye(2))
Matrix([
[1, 0],
[0, 1],
[2, 0],
[0, 2]])
"""
return reduce(cls.col_join, args)
def row_join(self, rhs):
"""Concatenates two matrices along self's last and rhs's first column
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.row_join(V)
Matrix([
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1]])
See Also
========
row
col_join
"""
if self.rows != rhs.rows:
raise ShapeError(
"`self` and `rhs` must have the same number of rows.")
from sympy.matrices import MutableMatrix
newmat = MutableMatrix.zeros(self.rows, self.cols + rhs.cols)
newmat[:, :self.cols] = self
newmat[:, self.cols:] = rhs
return type(self)(newmat)
def col_join(self, bott):
"""Concatenates two matrices along self's last and bott's first row
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.col_join(V)
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 1, 1]])
See Also
========
col
row_join
"""
if self.cols != bott.cols:
raise ShapeError(
"`self` and `bott` must have the same number of columns.")
from sympy.matrices import MutableMatrix
newmat = MutableMatrix.zeros(self.rows + bott.rows, self.cols)
newmat[:self.rows, :] = self
newmat[self.rows:, :] = bott
return type(self)(newmat)
def row_insert(self, pos, mti):
"""Insert one or more rows at the given row position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.row_insert(1, V)
Matrix([
[0, 0, 0],
[1, 1, 1],
[0, 0, 0],
[0, 0, 0]])
See Also
========
row
col_insert
"""
if pos == 0:
return mti.col_join(self)
elif pos < 0:
pos = self.rows + pos
if pos < 0:
pos = 0
elif pos > self.rows:
pos = self.rows
if self.cols != mti.cols:
raise ShapeError(
"`self` and `mti` must have the same number of columns.")
newmat = self.zeros(self.rows + mti.rows, self.cols)
i, j = pos, pos + mti.rows
newmat[:i, :] = self[:i, :]
newmat[i: j, :] = mti
newmat[j:, :] = self[i:, :]
return newmat
def col_insert(self, pos, mti):
"""Insert one or more columns at the given column position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.col_insert(1, V)
Matrix([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]])
See Also
========
col
row_insert
"""
if pos == 0:
return mti.row_join(self)
elif pos < 0:
pos = self.cols + pos
if pos < 0:
pos = 0
elif pos > self.cols:
pos = self.cols
if self.rows != mti.rows:
raise ShapeError("self and mti must have the same number of rows.")
from sympy.matrices import MutableMatrix
newmat = MutableMatrix.zeros(self.rows, self.cols + mti.cols)
i, j = pos, pos + mti.cols
newmat[:, :i] = self[:, :i]
newmat[:, i:j] = mti
newmat[:, j:] = self[:, i:]
return type(self)(newmat)
def replace(self, F, G, map=False):
"""Replaces Function F in Matrix entries with Function G.
Examples
========
>>> from sympy import symbols, Function, Matrix
>>> F, G = symbols('F, G', cls=Function)
>>> M = Matrix(2, 2, lambda i, j: F(i+j)) ; M
Matrix([
[F(0), F(1)],
[F(1), F(2)]])
>>> N = M.replace(F,G)
>>> N
Matrix([
[G(0), G(1)],
[G(1), G(2)]])
"""
M = self[:, :]
return M.applyfunc(lambda x: x.replace(F, G, map))
def pinv(self):
"""Calculate the Moore-Penrose pseudoinverse of the matrix.
The Moore-Penrose pseudoinverse exists and is unique for any matrix.
If the matrix is invertible, the pseudoinverse is the same as the
inverse.
Examples
========
>>> from sympy import Matrix
>>> Matrix([[1, 2, 3], [4, 5, 6]]).pinv()
Matrix([
[-17/18, 4/9],
[ -1/9, 1/9],
[ 13/18, -2/9]])
See Also
========
inv
pinv_solve
References
==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse
"""
A = self
AH = self.H
# Trivial case: pseudoinverse of all-zero matrix is its transpose.
if A.is_zero:
return AH
try:
if self.rows >= self.cols:
return (AH * A).inv() * AH
else:
return AH * (A * AH).inv()
except ValueError:
# Matrix is not full rank, so A*AH cannot be inverted.
raise NotImplementedError('Rank-deficient matrices are not yet '
'supported.')
def pinv_solve(self, B, arbitrary_matrix=None):
"""Solve Ax = B using the Moore-Penrose pseudoinverse.
There may be zero, one, or infinite solutions. If one solution
exists, it will be returned. If infinite solutions exist, one will
be returned based on the value of arbitrary_matrix. If no solutions
exist, the least-squares solution is returned.
Parameters
==========
B : Matrix
The right hand side of the equation to be solved for. Must have
the same number of rows as matrix A.
arbitrary_matrix : Matrix
If the system is underdetermined (e.g. A has more columns than
rows), infinite solutions are possible, in terms of an arbitrary
matrix. This parameter may be set to a specific matrix to use
for that purpose; if so, it must be the same shape as x, with as
many rows as matrix A has columns, and as many columns as matrix
B. If left as None, an appropriate matrix containing dummy
symbols in the form of ``wn_m`` will be used, with n and m being
row and column position of each symbol.
Returns
=======
x : Matrix
The matrix that will satisfy Ax = B. Will have as many rows as
matrix A has columns, and as many columns as matrix B.
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> B = Matrix([7, 8])
>>> A.pinv_solve(B)
Matrix([
[ _w0_0/6 - _w1_0/3 + _w2_0/6 - 55/18],
[-_w0_0/3 + 2*_w1_0/3 - _w2_0/3 + 1/9],
[ _w0_0/6 - _w1_0/3 + _w2_0/6 + 59/18]])
>>> A.pinv_solve(B, arbitrary_matrix=Matrix([0, 0, 0]))
Matrix([
[-55/18],
[ 1/9],
[ 59/18]])
See Also
========
lower_triangular_solve
upper_triangular_solve
cholesky_solve
diagonal_solve
LDLsolve
LUsolve
QRsolve
pinv
Notes
=====
This may return either exact solutions or least squares solutions.
To determine which, check ``A * A.pinv() * B == B``. It will be
True if exact solutions exist, and False if only a least-squares
solution exists. Be aware that the left hand side of that equation
may need to be simplified to correctly compare to the right hand
side.
References
==========
.. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse#Obtaining_all_solutions_of_a_linear_system
"""
from sympy.matrices import eye
A = self
A_pinv = self.pinv()
if arbitrary_matrix is None:
rows, cols = A.cols, B.cols
w = symbols('w:{0}_:{1}'.format(rows, cols), cls=Dummy)
arbitrary_matrix = self.__class__(cols, rows, w).T
return A_pinv * B + (eye(A.cols) - A_pinv*A) * arbitrary_matrix
def classof(A, B):
"""
Get the type of the result when combining matrices of different types.
Currently the strategy is that immutability is contagious.
Examples
========
>>> from sympy import Matrix, ImmutableMatrix
>>> from sympy.matrices.matrices import classof
>>> M = Matrix([[1, 2], [3, 4]]) # a Mutable Matrix
>>> IM = ImmutableMatrix([[1, 2], [3, 4]])
>>> classof(M, IM)
<class 'sympy.matrices.immutable.ImmutableMatrix'>
"""
try:
if A._class_priority > B._class_priority:
return A.__class__
else:
return B.__class__
except Exception:
pass
try:
import numpy
if isinstance(A, numpy.ndarray):
return B.__class__
if isinstance(B, numpy.ndarray):
return A.__class__
except Exception:
pass
raise TypeError("Incompatible classes %s, %s" % (A.__class__, B.__class__))
def a2idx(j, n=None):
"""Return integer after making positive and validating against n."""
if type(j) is not int:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (j, ))
if n is not None:
if j < 0:
j += n
if not (j >= 0 and j < n):
raise IndexError("Index out of range: a[%s]" % (j, ))
return int(j)
| bsd-3-clause |
PurpleBooth/python-vm | venv/lib/python2.7/site-packages/setuptools/msvc9_support.py | 429 | 2187 | try:
import distutils.msvc9compiler
except ImportError:
pass
unpatched = dict()
def patch_for_specialized_compiler():
"""
Patch functions in distutils.msvc9compiler to use the standalone compiler
build for Python (Windows only). Fall back to original behavior when the
standalone compiler is not available.
"""
if 'distutils' not in globals():
# The module isn't available to be patched
return
if unpatched:
# Already patched
return
unpatched.update(vars(distutils.msvc9compiler))
distutils.msvc9compiler.find_vcvarsall = find_vcvarsall
distutils.msvc9compiler.query_vcvarsall = query_vcvarsall
def find_vcvarsall(version):
Reg = distutils.msvc9compiler.Reg
VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
key = VC_BASE % ('', version)
try:
# Per-user installs register the compiler path here
productdir = Reg.get_value(key, "installdir")
except KeyError:
try:
# All-user installs on a 64-bit system register here
key = VC_BASE % ('Wow6432Node\\', version)
productdir = Reg.get_value(key, "installdir")
except KeyError:
productdir = None
if productdir:
import os
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
return unpatched['find_vcvarsall'](version)
def query_vcvarsall(version, *args, **kwargs):
try:
return unpatched['query_vcvarsall'](version, *args, **kwargs)
except distutils.errors.DistutilsPlatformError as exc:
if exc and "vcvarsall.bat" in exc.args[0]:
message = 'Microsoft Visual C++ %0.1f is required (%s).' % (version, exc.args[0])
if int(version) == 9:
# This redirection link is maintained by Microsoft.
# Contact vspython@microsoft.com if it needs updating.
raise distutils.errors.DistutilsPlatformError(
message + ' Get it from http://aka.ms/vcpython27'
)
raise distutils.errors.DistutilsPlatformError(message)
raise
| mit |
cgstudiomap/cgstudiomap | main/eggs/Django-1.9-py2.7.egg/django/contrib/auth/middleware.py | 258 | 5718 | from django.contrib import auth
from django.contrib.auth import load_backend
from django.contrib.auth.backends import RemoteUserBackend
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject
def get_user(request):
if not hasattr(request, '_cached_user'):
request._cached_user = auth.get_user(request)
return request._cached_user
class AuthenticationMiddleware(object):
def process_request(self, request):
assert hasattr(request, 'session'), (
"The Django authentication middleware requires session middleware "
"to be installed. Edit your MIDDLEWARE_CLASSES setting to insert "
"'django.contrib.sessions.middleware.SessionMiddleware' before "
"'django.contrib.auth.middleware.AuthenticationMiddleware'."
)
request.user = SimpleLazyObject(lambda: get_user(request))
class SessionAuthenticationMiddleware(object):
"""
Formerly, a middleware for invalidating a user's sessions that don't
correspond to the user's current session authentication hash. However, it
caused the "Vary: Cookie" header on all responses.
Now a backwards compatibility shim that enables session verification in
auth.get_user() if this middleware is in MIDDLEWARE_CLASSES.
"""
def process_request(self, request):
pass
class RemoteUserMiddleware(object):
"""
Middleware for utilizing Web-server-provided authentication.
If request.user is not authenticated, then this middleware attempts to
authenticate the username passed in the ``REMOTE_USER`` request header.
If authentication is successful, the user is automatically logged in to
persist the user in the session.
The header used is configurable and defaults to ``REMOTE_USER``. Subclass
this class and change the ``header`` attribute if you need to use a
different header.
"""
# Name of request header to grab username from. This will be the key as
# used in the request.META dictionary, i.e. the normalization of headers to
# all uppercase and the addition of "HTTP_" prefix apply.
header = "REMOTE_USER"
force_logout_if_no_header = True
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
try:
username = request.META[self.header]
except KeyError:
# If specified header doesn't exist then remove any existing
# authenticated remote-user, or return (leaving request.user set to
# AnonymousUser by the AuthenticationMiddleware).
if self.force_logout_if_no_header and request.user.is_authenticated():
self._remove_invalid_user(request)
return
# If the user is already authenticated and that user is the user we are
# getting passed in the headers, then the correct user is already
# persisted in the session and we don't need to continue.
if request.user.is_authenticated():
if request.user.get_username() == self.clean_username(username, request):
return
else:
# An authenticated user is associated with the request, but
# it does not match the authorized user in the header.
self._remove_invalid_user(request)
# We are seeing this user for the first time in this session, attempt
# to authenticate the user.
user = auth.authenticate(remote_user=username)
if user:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
auth.login(request, user)
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[auth.BACKEND_SESSION_KEY]
backend = auth.load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError: # Backend has no clean_username method.
pass
return username
def _remove_invalid_user(self, request):
"""
Removes the current authenticated user in the request which is invalid
but only if the user is authenticated via the RemoteUserBackend.
"""
try:
stored_backend = load_backend(request.session.get(auth.BACKEND_SESSION_KEY, ''))
except ImportError:
# backend failed to load
auth.logout(request)
else:
if isinstance(stored_backend, RemoteUserBackend):
auth.logout(request)
class PersistentRemoteUserMiddleware(RemoteUserMiddleware):
"""
Middleware for Web-server provided authentication on logon pages.
Like RemoteUserMiddleware but keeps the user authenticated even if
the header (``REMOTE_USER``) is not found in the request. Useful
for setups when the external authentication via ``REMOTE_USER``
is only expected to happen on some "logon" URL and the rest of
the application wants to use Django's authentication mechanism.
"""
force_logout_if_no_header = False
| agpl-3.0 |
lihui7115/ChromiumGStreamerBackend | tools/telemetry/third_party/typ/typ/fakes/tests/test_result_server_fake_test.py | 81 | 1298 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typ.fakes import test_result_server_fake
from typ import Host
class TestResultServerFakeTest(unittest.TestCase):
def test_basic_upload(self):
host = Host()
server = None
posts = []
try:
server = test_result_server_fake.start()
url = 'http://%s:%d/testfile/upload' % server.server_address
if server:
resp = host.fetch(url, 'foo=bar')
finally:
if server:
posts = server.stop()
self.assertEqual(posts, [('post', '/testfile/upload',
'foo=bar'.encode('utf8'))])
self.assertNotEqual(server.log.getvalue(), '')
| bsd-3-clause |
SPKian/Testing2 | erpnext/config/learn.py | 9 | 4470 | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("General"),
"items": [
{
"type": "help",
"label": _("Navigating"),
"youtube_id": "YDoI2DF4Lmc"
},
{
"type": "help",
"label": _("Setup Wizard"),
"youtube_id": "oIOf_zCFWKQ"
},
{
"type": "help",
"label": _("Customizing Forms"),
"youtube_id": "pJhL9mmxV_U"
},
]
},
{
"label": _("Setup"),
"items": [
{
"type": "help",
"label": _("Data Import and Export"),
"youtube_id": "6wiriRKPhmg"
},
{
"type": "help",
"label": _("Opening Stock Balance"),
"youtube_id": "0yPgrtfeCTs"
},
{
"type": "help",
"label": _("Setting up Email"),
"youtube_id": "YFYe0DrB95o"
},
{
"type": "help",
"label": _("Printing and Branding"),
"youtube_id": "cKZHcx1znMc"
},
{
"type": "help",
"label": _("Users and Permissions"),
"youtube_id": "fnBoRhBrwR4"
},
{
"type": "help",
"label": _("Workflow"),
"youtube_id": "yObJUg9FxFs"
},
]
},
{
"label": _("Accounts"),
"items": [
{
"type": "help",
"label": _("Chart of Accounts"),
"youtube_id": "DyR-DST-PyA"
},
{
"type": "help",
"label": _("Setting up Taxes"),
"youtube_id": "nQ1zZdPgdaQ"
},
{
"type": "help",
"label": _("Opening Accounting Balance"),
"youtube_id": "kdgM20Q-q68"
}
]
},
{
"label": _("CRM"),
"items": [
{
"type": "help",
"label": _("Lead to Quotation"),
"youtube_id": "TxYX4r4JAKA"
},
{
"type": "help",
"label": _("Newsletters"),
"youtube_id": "muLKsCrrDRo"
},
]
},
{
"label": _("Selling"),
"items": [
{
"type": "help",
"label": _("Customer and Supplier"),
"youtube_id": "anoGi_RpQ20"
},
{
"type": "help",
"label": _("Sales Order to Payment"),
"youtube_id": "7AMq4lqkN4A"
},
{
"type": "help",
"label": _("Point-of-Sale"),
"youtube_id": "4WkelWkbP_c"
}
]
},
{
"label": _("Stock"),
"items": [
{
"type": "help",
"label": _("Items and Pricing"),
"youtube_id": "qXaEwld4_Ps"
},
{
"type": "help",
"label": _("Item Variants"),
"youtube_id": "OGBETlCzU5o"
},
{
"type": "help",
"label": _("Opening Stock Balance"),
"youtube_id": "0yPgrtfeCTs"
},
{
"type": "help",
"label": _("Making Stock Entries"),
"youtube_id": "Njt107hlY3I"
},
{
"type": "help",
"label": _("Serialized Inventory"),
"youtube_id": "gvOVlEwFDAk"
},
{
"type": "help",
"label": _("Batch Inventory"),
"youtube_id": "J0QKl7ABPKM"
},
{
"type": "help",
"label": _("Managing Subcontracting"),
"youtube_id": "ThiMCC2DtKo"
},
]
},
{
"label": _("Buying"),
"items": [
{
"type": "help",
"label": _("Customer and Supplier"),
"youtube_id": "anoGi_RpQ20"
},
{
"type": "help",
"label": _("Material Request to Purchase Order"),
"youtube_id": "4TN9kPyfIqM"
},
{
"type": "help",
"label": _("Purchase Order to Payment"),
"youtube_id": "EK65tLdVUDk"
},
{
"type": "help",
"label": _("Managing Subcontracting"),
"youtube_id": "ThiMCC2DtKo"
},
]
},
{
"label": _("Manufacturing"),
"items": [
{
"type": "help",
"label": _("Bill of Materials"),
"youtube_id": "hDV0c1OeWLo"
},
{
"type": "help",
"label": _("Production Planning Tool"),
"youtube_id": "CzatSl4zJ2Y"
},
{
"type": "help",
"label": _("Production Order"),
"youtube_id": "ZotgLyp2YFY"
},
]
},
{
"label": _("Human Resource"),
"items": [
{
"type": "help",
"label": _("Setting up Employees"),
"youtube_id": "USfIUdZlUhw"
},
{
"type": "help",
"label": _("Leave Management"),
"youtube_id": "fc0p_AXebc8"
},
{
"type": "help",
"label": _("Expense Claims"),
"youtube_id": "5SZHJF--ZFY"
},
{
"type": "help",
"label": _("Processing Payroll"),
"youtube_id": "apgE-f25Rm0"
},
]
},
{
"label": _("Projects"),
"items": [
{
"type": "help",
"label": _("Managing Projects"),
"youtube_id": "egxIGwtoKI4"
},
]
},
]
| agpl-3.0 |
yoazmenda/Hearthstone_deck_builder | tests/agents/trade/play_tests.py | 9 | 4964 | import unittest
from hearthbreaker.cards import ArgentSquire, DireWolfAlpha, HarvestGolem, BloodfenRaptor, MagmaRager, Wisp, Ysera
from hearthbreaker.cards.spells.neutral import TheCoin
from tests.agents.trade.test_helpers import TestHelpers
from hearthbreaker.agents.trade.possible_play import PossiblePlays
from tests.agents.trade.test_case_mixin import TestCaseMixin
class TestTradeAgentPlayTests(TestCaseMixin, unittest.TestCase):
def test_simple_plays(self):
game = TestHelpers().make_game()
self.set_hand(game, 0, ArgentSquire(), DireWolfAlpha(), HarvestGolem())
game.play_single_turn()
self.assert_minions(game.players[0], "Argent Squire")
game.play_single_turn()
game.play_single_turn()
self.assert_minions(game.players[0], "Argent Squire", "Dire Wolf Alpha")
def test_will_play_biggest(self):
game = TestHelpers().make_game()
game.players[0].hand = self.make_cards(game.current_player, ArgentSquire(), ArgentSquire(), DireWolfAlpha())
game.players[0].mana = 1
game.players[0].max_mana = 1
game.play_single_turn()
self.assert_minions(game.players[0], "Dire Wolf Alpha")
def test_will_play_multiple(self):
game = TestHelpers().make_game()
game.players[0].hand = self.make_cards(game.current_player, ArgentSquire(), ArgentSquire(), ArgentSquire())
game.players[0].mana = 1
game.players[0].max_mana = 1
game.play_single_turn()
self.assert_minions(game.players[0], "Argent Squire", "Argent Squire")
def test_will_play_multiple_correct_order(self):
game = TestHelpers().make_game()
game.players[0].hand = self.make_cards(game.current_player, ArgentSquire(), ArgentSquire(), ArgentSquire(),
HarvestGolem())
game.players[0].mana = 3
game.players[0].max_mana = 3
game.play_single_turn()
self.assert_minions(game.players[0], "Harvest Golem", "Argent Squire")
def test_will_use_entire_pool(self):
game = TestHelpers().make_game()
game.players[0].hand = self.make_cards(game.current_player, DireWolfAlpha(), DireWolfAlpha(), DireWolfAlpha(),
HarvestGolem())
game.players[0].mana = 3
game.players[0].max_mana = 3
game.play_single_turn()
self.assert_minions(game.players[0], "Dire Wolf Alpha", "Dire Wolf Alpha")
def test_will_play_three_cards(self):
game = TestHelpers().make_game()
self.set_hand(game, 0, Wisp(), ArgentSquire(), DireWolfAlpha())
self.set_mana(game, 0, 3)
game.play_single_turn()
self.assert_minions(game.players[0], "Wisp", "Argent Squire", "Dire Wolf Alpha")
class TestTradeAgentPlayCoinTests(TestCaseMixin, unittest.TestCase):
def test_coin(self):
game = self.make_game()
cards = self.make_cards(game.current_player, ArgentSquire(), BloodfenRaptor(), TheCoin())
possible_plays = PossiblePlays(cards, 1)
play = possible_plays.plays()[0]
names = [c.name for c in play.cards]
self.assertEqual(names, ["The Coin", "Bloodfen Raptor"])
def test_coin_save(self):
game = self.make_game()
cards = self.make_cards(game.current_player, ArgentSquire(), MagmaRager(), TheCoin())
possible_plays = PossiblePlays(cards, 1)
play = possible_plays.plays()[0]
names = [c.name for c in play.cards]
self.assertEqual(names, ["Argent Squire"])
class TestTradeAgentHeroPowerTests(TestCaseMixin, unittest.TestCase):
def test_will_use_hero_power_with_empty_hand(self):
game = TestHelpers().make_game()
self.set_hand(game, 0)
self.set_mana(game, 0, 10)
possible = PossiblePlays([], 10)
play = possible.plays()[0]
self.assertEqual(play.cards[0].name, "Hero Power")
game.play_single_turn()
self.assert_minions(game.players[0], "War Golem")
def test_wont_kill_self_with_hero_power(self):
game = TestHelpers().make_game()
self.set_hand(game, 0)
self.set_mana(game, 0, 2)
game.players[0].hero.health = 1
game.play_single_turn()
self.assert_minions(game.players[0])
self.assertEqual(game.players[0].hero.health, 1)
def test_will_hero_power_first_if_inevitable(self):
game = self.make_game()
cards = self.make_cards(game.current_player, DireWolfAlpha())
possible = PossiblePlays(cards, 10)
play = possible.plays()[0]
self.assertEqual(play.first_card().name, "Hero Power")
def test_will_not_hero_power_if_not_inevitable(self):
game = self.make_game()
cards = self.make_cards(game.current_player, Ysera())
possible = PossiblePlays(cards, 10)
play = possible.plays()[0]
self.assertEqual(play.first_card().name, "Ysera")
| mit |
bsmrstu-warriors/Moytri--The-Drone-Aider | Lib/site-packages/numpy/lib/tests/test_index_tricks.py | 53 | 4333 | from numpy.testing import *
import numpy as np
from numpy import ( array, ones, r_, mgrid, unravel_index, zeros, where,
ndenumerate, fill_diagonal, diag_indices,
diag_indices_from, s_, index_exp )
class TestUnravelIndex(TestCase):
def test_basic(self):
assert unravel_index(2,(2,2)) == (1,0)
assert unravel_index(254,(17,94)) == (2, 66)
assert_raises(ValueError, unravel_index, 4,(2,2))
class TestGrid(TestCase):
def test_basic(self):
a = mgrid[-1:1:10j]
b = mgrid[-1:1:0.1]
assert(a.shape == (10,))
assert(b.shape == (20,))
assert(a[0] == -1)
assert_almost_equal(a[-1],1)
assert(b[0] == -1)
assert_almost_equal(b[1]-b[0],0.1,11)
assert_almost_equal(b[-1],b[0]+19*0.1,11)
assert_almost_equal(a[1]-a[0],2.0/9.0,11)
def test_linspace_equivalence(self):
y,st = np.linspace(2,10,retstep=1)
assert_almost_equal(st,8/49.0)
assert_array_almost_equal(y,mgrid[2:10:50j],13)
def test_nd(self):
c = mgrid[-1:1:10j,-2:2:10j]
d = mgrid[-1:1:0.1,-2:2:0.2]
assert(c.shape == (2,10,10))
assert(d.shape == (2,20,20))
assert_array_equal(c[0][0,:],-ones(10,'d'))
assert_array_equal(c[1][:,0],-2*ones(10,'d'))
assert_array_almost_equal(c[0][-1,:],ones(10,'d'),11)
assert_array_almost_equal(c[1][:,-1],2*ones(10,'d'),11)
assert_array_almost_equal(d[0,1,:]-d[0,0,:], 0.1*ones(20,'d'),11)
assert_array_almost_equal(d[1,:,1]-d[1,:,0], 0.2*ones(20,'d'),11)
class TestConcatenator(TestCase):
def test_1d(self):
assert_array_equal(r_[1,2,3,4,5,6],array([1,2,3,4,5,6]))
b = ones(5)
c = r_[b,0,0,b]
assert_array_equal(c,[1,1,1,1,1,0,0,1,1,1,1,1])
def test_mixed_type(self):
g = r_[10.1, 1:10]
assert(g.dtype == 'f8')
def test_more_mixed_type(self):
g = r_[-10.1, array([1]), array([2,3,4]), 10.0]
assert(g.dtype == 'f8')
def test_2d(self):
b = rand(5,5)
c = rand(5,5)
d = r_['1',b,c] # append columns
assert(d.shape == (5,10))
assert_array_equal(d[:,:5],b)
assert_array_equal(d[:,5:],c)
d = r_[b,c]
assert(d.shape == (10,5))
assert_array_equal(d[:5,:],b)
assert_array_equal(d[5:,:],c)
class TestNdenumerate(TestCase):
def test_basic(self):
a = array([[1,2], [3,4]])
assert_equal(list(ndenumerate(a)),
[((0,0), 1), ((0,1), 2), ((1,0), 3), ((1,1), 4)])
class TestIndexExpression(TestCase):
def test_regression_1(self):
# ticket #1196
a = np.arange(2)
assert_equal(a[:-1], a[s_[:-1]])
assert_equal(a[:-1], a[index_exp[:-1]])
def test_simple_1(self):
a = np.random.rand(4,5,6)
assert_equal(a[:,:3,[1,2]], a[index_exp[:,:3,[1,2]]])
assert_equal(a[:,:3,[1,2]], a[s_[:,:3,[1,2]]])
def test_fill_diagonal():
a = zeros((3, 3),int)
fill_diagonal(a, 5)
yield (assert_array_equal, a,
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]]))
# The same function can operate on a 4-d array:
a = zeros((3, 3, 3, 3), int)
fill_diagonal(a, 4)
i = array([0, 1, 2])
yield (assert_equal, where(a != 0), (i, i, i, i))
def test_diag_indices():
di = diag_indices(4)
a = array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
a[di] = 100
yield (assert_array_equal, a,
array([[100, 2, 3, 4],
[ 5, 100, 7, 8],
[ 9, 10, 100, 12],
[ 13, 14, 15, 100]]))
# Now, we create indices to manipulate a 3-d array:
d3 = diag_indices(2, 3)
# And use it to set the diagonal of a zeros array to 1:
a = zeros((2, 2, 2),int)
a[d3] = 1
yield (assert_array_equal, a,
array([[[1, 0],
[0, 0]],
[[0, 0],
[0, 1]]]) )
def test_diag_indices_from():
x = np.random.random((4, 4))
r, c = diag_indices_from(x)
assert_array_equal(r, np.arange(4))
assert_array_equal(c, np.arange(4))
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
shiora/The-Perfect-Pokemon-Team-Balancer | libs/env/Lib/site-packages/pip/_vendor/pkg_resources.py | 160 | 98660 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys
import os
import time
import re
import imp
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
from pkgutil import get_importer
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
try:
basestring
next = lambda o: o.next()
from cStringIO import StringIO as BytesIO
except NameError:
basestring = str
from io import BytesIO
def execfile(fn, globs=None, locs=None):
if globs is None:
globs = globals()
if locs is None:
locs = globs
exec(compile(open(fn).read(), fn, 'exec'), globs, locs)
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib._bootstrap as importlib_bootstrap
else:
importlib_bootstrap = None
try:
import parser
except ImportError:
pass
def _bypass_ensure_directory(name, mode=0x1FF): # 0777
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
_state_vars = {}
def _declare_state(vartype, **kw):
g = globals()
for name, val in kw.items():
g[name] = val
_state_vars[name] = vartype
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set. If it's added, any
callbacks registered with the ``subscribe()`` method will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None:
if env is None:
env = Environment(self.entries)
dist = best[req.key] = env.best_match(req, self, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
list(map(shadow_set.add, self)) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError:
v = sys.exc_info()[1]
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0x16D) & 0xFFF # 0555, 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': lambda: sys.version.split()[0],
'python_version': lambda:'%s.%s' % (sys.version_info[0], sys.version_info[1]),
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError:
return cls.normalize_exception(sys.exc_info()[1])
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.and_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.or_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
raise SyntaxError("Language feature not supported in environment markers")
@classmethod
def comparison(cls, nodelist):
if len(nodelist)>4:
raise SyntaxError("Chained comparison not allowed in environment markers")
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
raise SyntaxError(repr(cop)+" operator not allowed in environment markers")
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
from pip._vendor import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError:
e = sys.exc_info()[1]
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \
or '\\' in s:
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
raise SyntaxError("Language feature not supported in environment markers")
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
if importlib_bootstrap is not None:
register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
def build_zipmanifest(path):
"""
This builds a similar dictionary to the zipimport directory
caches. However instead of tuples, ZipInfo objects are stored.
The translation of the tuple is as follows:
* [0] - zipinfo.filename on stock pythons this needs "/" --> os.sep
on pypy it is the same (one reason why distribute did work
in some cases on pypy and win32).
* [1] - zipinfo.compress_type
* [2] - zipinfo.compress_size
* [3] - zipinfo.file_size
* [4] - len(utf-8 encoding of filename) if zipinfo & 0x800
len(ascii encoding of filename) otherwise
* [5] - (zipinfo.date_time[0] - 1980) << 9 |
zipinfo.date_time[1] << 5 | zipinfo.date_time[2]
* [6] - (zipinfo.date_time[3] - 1980) << 11 |
zipinfo.date_time[4] << 5 | (zipinfo.date_time[5] // 2)
* [7] - zipinfo.CRC
"""
zipinfo = dict()
zfile = zipfile.ZipFile(path)
#Got ZipFile has not __exit__ on python 3.1
try:
for zitem in zfile.namelist():
zpath = zitem.replace('/', os.sep)
zipinfo[zpath] = zfile.getinfo(zitem)
assert zipinfo[zpath] is not None
finally:
zfile.close()
return zipinfo
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = build_zipmanifest(self.loader.archive)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
date_time = zip_stat.date_time + (0, 0, -1) # ymdhms+wday, yday, dst
#1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
f = open(file_path, 'rb')
file_contents = f.read()
f.close()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = build_zipmanifest(importer.archive)
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
entry_file = open(os.path.join(path_item, entry))
try:
entry_lines = entry_file.readlines()
finally:
entry_file.close()
for line in entry_lines:
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(pkgutil.ImpImporter,find_on_path)
if importlib_bootstrap is not None:
register_finder(importlib_bootstrap.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = imp.new_module(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer,path_item,packageName,module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
module.__path__ = path
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
if importlib_bootstrap is not None:
register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
list(map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer)))
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()),
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or %s file" % self.PKG_INFO, self
)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':',1)
if invalid_marker(marker):
reqs=[] # XXX warn
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
list(map(declare_namespace, self._get_metadata('namespace_packages.txt')))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='setuptools':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
from email.parser import Parser
self._pkg_info = Parser().parsestr(self.get_metadata(self.PKG_INFO))
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from pip._vendor._markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = next(lines)
p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key != self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F':
return False
elif action=='T':
return True
elif action=='+':
last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs)==1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
state_machine = {
# =><
'<': '--T',
'<=': 'T-T',
'>': 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
_declare_state('object', working_set = WorkingSet())
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
list(map(working_set.add_entry,sys.path)) # match order
| gpl-2.0 |
firmlyjin/brython | www/tests/unittests/test/test_slice.py | 93 | 4433 | # tests for slice objects; in particular the indices method.
import unittest
from test import support
from pickle import loads, dumps
import sys
class SliceTest(unittest.TestCase):
def test_constructor(self):
self.assertRaises(TypeError, slice)
self.assertRaises(TypeError, slice, 1, 2, 3, 4)
def test_repr(self):
self.assertEqual(repr(slice(1, 2, 3)), "slice(1, 2, 3)")
def test_hash(self):
# Verify clearing of SF bug #800796
self.assertRaises(TypeError, hash, slice(5))
self.assertRaises(TypeError, slice(5).__hash__)
def test_cmp(self):
s1 = slice(1, 2, 3)
s2 = slice(1, 2, 3)
s3 = slice(1, 2, 4)
self.assertEqual(s1, s2)
self.assertNotEqual(s1, s3)
self.assertNotEqual(s1, None)
self.assertNotEqual(s1, (1, 2, 3))
self.assertNotEqual(s1, "")
class Exc(Exception):
pass
class BadCmp(object):
def __eq__(self, other):
raise Exc
s1 = slice(BadCmp())
s2 = slice(BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, lambda: s1 == s2)
s1 = slice(1, BadCmp())
s2 = slice(1, BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, lambda: s1 == s2)
s1 = slice(1, 2, BadCmp())
s2 = slice(1, 2, BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, lambda: s1 == s2)
def test_members(self):
s = slice(1)
self.assertEqual(s.start, None)
self.assertEqual(s.stop, 1)
self.assertEqual(s.step, None)
s = slice(1, 2)
self.assertEqual(s.start, 1)
self.assertEqual(s.stop, 2)
self.assertEqual(s.step, None)
s = slice(1, 2, 3)
self.assertEqual(s.start, 1)
self.assertEqual(s.stop, 2)
self.assertEqual(s.step, 3)
class AnyClass:
pass
obj = AnyClass()
s = slice(obj)
self.assertTrue(s.stop is obj)
def test_indices(self):
self.assertEqual(slice(None ).indices(10), (0, 10, 1))
self.assertEqual(slice(None, None, 2).indices(10), (0, 10, 2))
self.assertEqual(slice(1, None, 2).indices(10), (1, 10, 2))
self.assertEqual(slice(None, None, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, None, -2).indices(10), (9, -1, -2))
self.assertEqual(slice(3, None, -2).indices(10), (3, -1, -2))
# issue 3004 tests
self.assertEqual(slice(None, -9).indices(10), (0, 1, 1))
self.assertEqual(slice(None, -10).indices(10), (0, 0, 1))
self.assertEqual(slice(None, -11).indices(10), (0, 0, 1))
self.assertEqual(slice(None, -10, -1).indices(10), (9, 0, -1))
self.assertEqual(slice(None, -11, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, -12, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, 9).indices(10), (0, 9, 1))
self.assertEqual(slice(None, 10).indices(10), (0, 10, 1))
self.assertEqual(slice(None, 11).indices(10), (0, 10, 1))
self.assertEqual(slice(None, 8, -1).indices(10), (9, 8, -1))
self.assertEqual(slice(None, 9, -1).indices(10), (9, 9, -1))
self.assertEqual(slice(None, 10, -1).indices(10), (9, 9, -1))
self.assertEqual(
slice(-100, 100 ).indices(10),
slice(None).indices(10)
)
self.assertEqual(
slice(100, -100, -1).indices(10),
slice(None, None, -1).indices(10)
)
self.assertEqual(slice(-100, 100, 2).indices(10), (0, 10, 2))
self.assertEqual(list(range(10))[::sys.maxsize - 1], [0])
self.assertRaises(OverflowError, slice(None).indices, 1<<100)
def test_setslice_without_getslice(self):
tmp = []
class X(object):
def __setitem__(self, i, k):
tmp.append((i, k))
x = X()
x[1:2] = 42
self.assertEqual(tmp, [(slice(1, 2), 42)])
def test_pickle(self):
s = slice(10, 20, 3)
for protocol in (0,1,2):
t = loads(dumps(s, protocol))
self.assertEqual(s, t)
self.assertEqual(s.indices(15), t.indices(15))
self.assertNotEqual(id(s), id(t))
def test_main():
support.run_unittest(SliceTest)
if __name__ == "__main__":
test_main()
| bsd-3-clause |
RickyZhong/SaltAdmin | view/monitor.py | 7 | 1408 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from main import *
# 监控中心
# Redis
class M_Redis:
def GET(self):
if getLogin():
sData = getLogin()
SID = sData['SID']
ShowName = sData['ShowName']
#print sData
#print "ShowName: " + ShowName
return render.monitor_redis(ShowName=ShowName,uid=SID)
else:
web.setcookie('HTTP_REFERER', web.ctx.fullpath, 86400)
return web.seeother("/login")
# MySQL
class M_MySQL:
def GET(self):
if getLogin():
sData = getLogin()
SID = sData['SID']
ShowName = sData['ShowName']
#print sData
#print "ShowName: " + ShowName
return render.monitor_mysql(ShowName=ShowName,uid=SID)
else:
web.setcookie('HTTP_REFERER', web.ctx.fullpath, 86400)
return web.seeother("/login")
# 网络流量
class M_Traffic:
def GET(self):
if getLogin():
sData = getLogin()
SID = sData['SID']
ShowName = sData['ShowName']
#print sData
#print "ShowName: " + ShowName
return render.monitor_bandwidth(ShowName=ShowName,uid=SID)
else:
web.setcookie('HTTP_REFERER', web.ctx.fullpath, 86400)
return web.seeother("/login")
| gpl-2.0 |
rhythmsosad/numpy | numpy/ma/tests/test_regression.py | 113 | 2470 | from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (assert_, TestCase, assert_array_equal,
assert_allclose, run_module_suite)
from numpy.compat import sixu
rlevel = 1
class TestRegression(TestCase):
def test_masked_array_create(self,level=rlevel):
# Ticket #17
x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6],
mask=[0, 0, 0, 1, 1, 1, 0, 0])
assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]])
def test_masked_array(self,level=rlevel):
# Ticket #61
np.ma.array(1, mask=[1])
def test_mem_masked_where(self,level=rlevel):
# Ticket #62
from numpy.ma import masked_where, MaskType
a = np.zeros((1, 1))
b = np.zeros(a.shape, MaskType)
c = masked_where(b, a)
a-c
def test_masked_array_multiply(self,level=rlevel):
# Ticket #254
a = np.ma.zeros((4, 1))
a[2, 0] = np.ma.masked
b = np.zeros((4, 2))
a*b
b*a
def test_masked_array_repeat(self, level=rlevel):
# Ticket #271
np.ma.array([1], mask=False).repeat(10)
def test_masked_array_repr_unicode(self):
# Ticket #1256
repr(np.ma.array(sixu("Unicode")))
def test_atleast_2d(self):
# Ticket #1559
a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False])
b = np.atleast_2d(a)
assert_(a.mask.ndim == 1)
assert_(b.mask.ndim == 2)
def test_set_fill_value_unicode_py3(self):
# Ticket #2733
a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0])
a.fill_value = 'X'
assert_(a.fill_value == 'X')
def test_var_sets_maskedarray_scalar(self):
# Issue gh-2757
a = np.ma.array(np.arange(5), mask=True)
mout = np.ma.array(-1, dtype=float)
a.var(out=mout)
assert_(mout._data == 0)
def test_ddof_corrcoef(self):
# See gh-3336
x = np.ma.masked_equal([1, 2, 3, 4, 5], 4)
y = np.array([2, 2.5, 3.1, 3, 5])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
r0 = np.ma.corrcoef(x, y, ddof=0)
r1 = np.ma.corrcoef(x, y, ddof=1)
# ddof should not have an effect (it gets cancelled out)
assert_allclose(r0.data, r1.data)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
NoctuaNivalis/qutebrowser | qutebrowser/misc/editor.py | 2 | 7874 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Launcher for an external editor."""
import os
import tempfile
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QProcess
from qutebrowser.config import config
from qutebrowser.utils import message, log
from qutebrowser.misc import guiprocess
class ExternalEditor(QObject):
"""Class to simplify editing a text in an external editor.
Attributes:
_text: The current text before the editor is opened.
_filename: The name of the file to be edited.
_remove_file: Whether the file should be removed when the editor is
closed.
_proc: The GUIProcess of the editor.
"""
editing_finished = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent)
self._filename = None
self._proc = None
self._remove_file = None
def _cleanup(self):
"""Clean up temporary files after the editor closed."""
assert self._remove_file is not None
if self._filename is None or not self._remove_file:
# Could not create initial file.
return
try:
if self._proc.exit_status() != QProcess.CrashExit:
os.remove(self._filename)
except OSError as e:
# NOTE: Do not replace this with "raise CommandError" as it's
# executed async.
message.error("Failed to delete tempfile... ({})".format(e))
@pyqtSlot(int, QProcess.ExitStatus)
def on_proc_closed(self, exitcode, exitstatus):
"""Write the editor text into the form field and clean up tempfile.
Callback for QProcess when the editor was closed.
"""
log.procs.debug("Editor closed")
if exitstatus != QProcess.NormalExit:
# No error/cleanup here, since we already handle this in
# on_proc_error.
return
try:
if exitcode != 0:
return
encoding = config.val.editor.encoding
try:
with open(self._filename, 'r', encoding=encoding) as f:
text = f.read()
except OSError as e:
# NOTE: Do not replace this with "raise CommandError" as it's
# executed async.
message.error("Failed to read back edited file: {}".format(e))
return
log.procs.debug("Read back: {}".format(text))
self.editing_finished.emit(text)
finally:
self._cleanup()
@pyqtSlot(QProcess.ProcessError)
def on_proc_error(self, _err):
self._cleanup()
def edit(self, text, caret_position=None):
"""Edit a given text.
Args:
text: The initial text to edit.
caret_position: The position of the caret in the text.
"""
if self._filename is not None:
raise ValueError("Already editing a file!")
try:
# Close while the external process is running, as otherwise systems
# with exclusive write access (e.g. Windows) may fail to update
# the file from the external editor, see
# https://github.com/qutebrowser/qutebrowser/issues/1767
with tempfile.NamedTemporaryFile(
mode='w', prefix='qutebrowser-editor-',
encoding=config.val.editor.encoding,
delete=False) as fobj:
if text:
fobj.write(text)
self._filename = fobj.name
except OSError as e:
message.error("Failed to create initial file: {}".format(e))
return
self._remove_file = True
line, column = self._calc_line_and_column(text, caret_position)
self._start_editor(line=line, column=column)
def edit_file(self, filename):
"""Edit the file with the given filename."""
self._filename = filename
self._remove_file = False
self._start_editor()
def _start_editor(self, line=1, column=1):
"""Start the editor with the file opened as self._filename.
Args:
line: the line number to pass to the editor
column: the column number to pass to the editor
"""
self._proc = guiprocess.GUIProcess(what='editor', parent=self)
self._proc.finished.connect(self.on_proc_closed)
self._proc.error.connect(self.on_proc_error)
editor = config.val.editor.command
executable = editor[0]
args = [self._sub_placeholder(arg, line, column) for arg in editor[1:]]
log.procs.debug("Calling \"{}\" with args {}".format(executable, args))
self._proc.start(executable, args)
def _calc_line_and_column(self, text, caret_position):
r"""Calculate line and column numbers given a text and caret position.
Both line and column are 1-based indexes, because that's what most
editors use as line and column starting index. By "most" we mean at
least vim, nvim, gvim, emacs, atom, sublimetext, notepad++, brackets,
visual studio, QtCreator and so on.
To find the line we just count how many newlines there are before the
caret and add 1.
To find the column we calculate the difference between the caret and
the last newline before the caret.
For example in the text `aaa\nbb|bbb` (| represents the caret):
caret_position = 6
text[:caret_position] = `aaa\nbb`
text[:caret_position].count('\n') = 1
caret_position - text[:caret_position].rfind('\n') = 3
Thus line, column = 2, 3, and the caret is indeed in the second
line, third column
Args:
text: the text for which the numbers must be calculated
caret_position: the position of the caret in the text, or None
Return:
A (line, column) tuple of (int, int)
"""
if caret_position is None:
return 1, 1
line = text[:caret_position].count('\n') + 1
column = caret_position - text[:caret_position].rfind('\n')
return line, column
def _sub_placeholder(self, arg, line, column):
"""Substitute a single placeholder.
If the `arg` input to this function is a valid placeholder it will
be substituted with the appropriate value, otherwise it will be left
unchanged.
Args:
arg: an argument of editor.command.
line: the previously-calculated line number for the text caret.
column: the previously-calculated column number for the text caret.
Return:
The substituted placeholder or the original argument.
"""
replacements = {
'{}': self._filename,
'{file}': self._filename,
'{line}': str(line),
'{line0}': str(line-1),
'{column}': str(column),
'{column0}': str(column-1)
}
for old, new in replacements.items():
arg = arg.replace(old, new)
return arg
| gpl-3.0 |
uppalk1/RackHD | test/tests/api/v1_1/discovery_tests.py | 11 | 3302 | from config.api1_1_config import *
from config.amqp import *
from modules.logger import Log
from on_http_api1_1 import NodesApi as Nodes
from on_http_api1_1.rest import ApiException
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_is_not_none
from proboscis import test
from json import loads
from time import sleep
from tests.api.v1_1.poller_tests import PollerTests
from tests.api.v1_1.workflows_tests import WorkflowsTests
from tests.api.v1_1.obm_settings import obmSettings
LOG = Log(__name__)
@test(groups=["discovery.tests"])
class DiscoveryTests(object):
def __init__(self):
self.__client = config.api_client
self.__graph_name = None
self.__task_worker = None
self.__workflow_instance = WorkflowsTests()
def __get_workflow_status(self, id):
Nodes().nodes_identifier_workflows_active_get(id)
status = self.__client.last_response.status
if status == 200:
data = loads(self.__client.last_response.data)
status = data.get('_status')
assert_is_not_none(status)
return status
@test(groups=['test_discovery_post_reboot'], depends_on_groups=["test-node-poller"])
def test_node_workflows_post_reboot(self):
"""Testing reboot node POST:id/workflows"""
workflow = {
"friendlyName": "set PXE and reboot node",
"injectableName": "Graph.PXE.Reboot",
"tasks": [
{
"label": "set-boot-pxe",
"taskName": "Task.Obm.Node.PxeBoot",
},
{
"label": "reboot-start",
"taskName": "Task.Obm.Node.Reboot",
"waitOn": {
"set-boot-pxe": "succeeded"
}
}
]
}
self.__workflow_instance.put_workflow(workflow)
self.__workflow_instance.post_workflows("Graph.PXE.Reboot")
@test(groups=['test_discovery_delete_node'],
depends_on_groups=["test_discovery_post_reboot", "test-bm-discovery-prepare"])
def test_node_delete_all(self):
""" Testing DELETE all compute nodes """
codes = []
Nodes().nodes_get()
nodes = loads(self.__client.last_response.data)
for n in nodes:
if n.get('type') == 'compute':
uuid = n.get('id')
try:
Nodes().nodes_identifier_workflows_active_delete(uuid)
except ApiException as e:
assert_equal(404, e.status, message = 'status should be 404')
except (TypeError, ValueError) as e:
assert(e.message)
Nodes().nodes_identifier_delete(uuid)
codes.append(self.__client.last_response)
assert_not_equal(0, len(codes), message='Delete node list empty!')
for c in codes:
assert_equal(200, c.status, message=c.reason)
@test(groups=['test_discovery_add_obm'],
depends_on_groups=["test_discovery_delete_node", "test-bm-discovery"])
def test_node_add_obm(self):
assert_equal(len(obmSettings().setup_nodes(service_type='ipmi-obm-service')), 0)
| apache-2.0 |
tempbottle/rethinkdb | external/v8_3.30.33.16/tools/testrunner/local/utils.py | 40 | 3816 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from os.path import exists
from os.path import isdir
from os.path import join
import platform
import re
import urllib2
def GetSuitePaths(test_root):
return [ f for f in os.listdir(test_root) if isdir(join(test_root, f)) ]
# Reads a file into an array of strings
def ReadLinesFrom(name):
lines = []
with open(name) as f:
for line in f:
if line.startswith('#'): continue
if '#' in line:
line = line[:line.find('#')]
line = line.strip()
if not line: continue
lines.append(line)
return lines
def GuessOS():
system = platform.system()
if system == 'Linux':
return 'linux'
elif system == 'Darwin':
return 'macos'
elif system.find('CYGWIN') >= 0:
return 'cygwin'
elif system == 'Windows' or system == 'Microsoft':
# On Windows Vista platform.system() can return 'Microsoft' with some
# versions of Python, see http://bugs.python.org/issue1082
return 'windows'
elif system == 'FreeBSD':
return 'freebsd'
elif system == 'OpenBSD':
return 'openbsd'
elif system == 'SunOS':
return 'solaris'
elif system == 'NetBSD':
return 'netbsd'
else:
return None
def UseSimulator(arch):
machine = platform.machine()
return (machine and
(arch == "mipsel" or arch == "arm" or arch == "arm64") and
not arch.startswith(machine))
# This will default to building the 32 bit VM even on machines that are
# capable of running the 64 bit VM.
def DefaultArch():
machine = platform.machine()
machine = machine.lower() # Windows 7 capitalizes 'AMD64'.
if machine.startswith('arm'):
return 'arm'
elif (not machine) or (not re.match('(x|i[3-6])86$', machine) is None):
return 'ia32'
elif machine == 'i86pc':
return 'ia32'
elif machine == 'x86_64':
return 'ia32'
elif machine == 'amd64':
return 'ia32'
else:
return None
def GuessWordsize():
if '64' in platform.machine():
return '64'
else:
return '32'
def IsWindows():
return GuessOS() == 'windows'
def URLRetrieve(source, destination):
"""urllib is broken for SSL connections via a proxy therefore we
can't use urllib.urlretrieve()."""
with open(destination, 'w') as f:
f.write(urllib2.urlopen(source).read())
| agpl-3.0 |
mrabbah/snmpccgx | flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| gpl-3.0 |
vikomall/pyrax | samples/images/list_tasks.py | 13 | 1270 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2014 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import pyrax
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
imgs = pyrax.images
print("This will loop through all current tasks.")
tasks = imgs.list_tasks()
for task in tasks:
print()
print("Task ID=%s" % task.id)
print(" Type: %s" % task.type)
print(" Status: %s" % task.status)
print(" Message: %s" % task.message)
print(" Created: %s" % task.created_at)
print(" Expires: %s" % task.expires_at)
| apache-2.0 |
rohit21122012/DCASE2013 | runs/2013/xgboost10/src/features.py | 33 | 9663 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy
import librosa
import scipy
from scikits.talkbox import lpc
from scikits.talkbox.tools import segment_axis
from scipy.fftpack import fft,dct
def feature_extraction_lp_group_delay(y, fs=44100, statistics=True, lpgd_params=None, win_params=None):
eps = numpy.spacing(1)
nfft = lpgd_params['nfft']
lp_order = lpgd_params['lp_order']
y = y + eps
frames = segment_axis(y, win_params['win_length'], win_params['hop_length']);
print 'frames : ' + str(frames.shape)
a,e,k = lpc(frames, lp_order)
print 'a : ' + str(a.shape)
A = fft(a, nfft)
A = 1/A
phaseA = numpy.unwrap(numpy.angle(A))
print 'phaseA: ' + str(phaseA.shape)
phaseA = phaseA[:,0:nfft/2]
print 'phaseA: ' + str(phaseA.shape)
tauA = -1 * numpy.diff(phaseA)
print 'tauA' + str(tauA.shape)
# tau = numpy.concatenate((tauA, tauA[-1]))
# tau = tau
feature_matrix = tauA
feature_matrix = dct(feature_matrix, n=20)
print 'fm: ' + str(feature_matrix.shape)
# Collect into data structure
if statistics:
return {
'feat': feature_matrix,
'stat': {
'mean': numpy.mean(feature_matrix, axis=0),
'std': numpy.std(feature_matrix, axis=0),
'N': feature_matrix.shape[0],
'S1': numpy.sum(feature_matrix, axis=0),
'S2': numpy.sum(feature_matrix ** 2, axis=0),
}
}
else:
return {
'feat': feature_matrix}
def feature_extraction(y, fs=44100, statistics=True, include_mfcc0=True, include_delta=True,
include_acceleration=True, mfcc_params=None, delta_params=None, acceleration_params=None):
"""Feature extraction, MFCC based features
Outputs features in dict, format:
{
'feat': feature_matrix [shape=(frame count, feature vector size)],
'stat': {
'mean': numpy.mean(feature_matrix, axis=0),
'std': numpy.std(feature_matrix, axis=0),
'N': feature_matrix.shape[0],
'S1': numpy.sum(feature_matrix, axis=0),
'S2': numpy.sum(feature_matrix ** 2, axis=0),
}
}
Parameters
----------
y: numpy.array [shape=(signal_length, )]
Audio
fs: int > 0 [scalar]
Sample rate
(Default value=44100)
statistics: bool
Calculate feature statistics for extracted matrix
(Default value=True)
include_mfcc0: bool
Include 0th MFCC coefficient into static coefficients.
(Default value=True)
include_delta: bool
Include delta MFCC coefficients.
(Default value=True)
include_acceleration: bool
Include acceleration MFCC coefficients.
(Default value=True)
mfcc_params: dict or None
Parameters for extraction of static MFCC coefficients.
delta_params: dict or None
Parameters for extraction of delta MFCC coefficients.
acceleration_params: dict or None
Parameters for extraction of acceleration MFCC coefficients.
Returns
-------
result: dict
Feature dict
"""
eps = numpy.spacing(1)
# Windowing function
if mfcc_params['window'] == 'hamming_asymmetric':
window = scipy.signal.hamming(mfcc_params['n_fft'], sym=False)
elif mfcc_params['window'] == 'hamming_symmetric':
window = scipy.signal.hamming(mfcc_params['n_fft'], sym=True)
elif mfcc_params['window'] == 'hann_asymmetric':
window = scipy.signal.hann(mfcc_params['n_fft'], sym=False)
elif mfcc_params['window'] == 'hann_symmetric':
window = scipy.signal.hann(mfcc_params['n_fft'], sym=True)
else:
window = None
print 'y: ' + str(y.shape)
print 'winlength: '+ str(mfcc_params['win_length'])
# Calculate Static Coefficients
magnitude_spectrogram = numpy.abs(librosa.stft(y + eps,
n_fft=mfcc_params['n_fft'],
win_length=mfcc_params['win_length'],
hop_length=mfcc_params['hop_length'],
center=True,
window=window))**2
print 'mag_spec: ' + str(magnitude_spectrogram.shape)
mel_basis = librosa.filters.mel(sr=fs,
n_fft=mfcc_params['n_fft'],
n_mels=mfcc_params['n_mels'],
fmin=mfcc_params['fmin'],
fmax=mfcc_params['fmax'],
htk=mfcc_params['htk'])
mel_spectrum = numpy.dot(mel_basis, magnitude_spectrogram)
mfcc = librosa.feature.mfcc(S=librosa.logamplitude(mel_spectrum))
print 'mfcc dimensions: ' + str(mfcc.shape)
# Collect the feature matrix
feature_matrix = mfcc
if include_delta:
# Delta coefficients
mfcc_delta = librosa.feature.delta(mfcc, **delta_params)
# Add Delta Coefficients to feature matrix
feature_matrix = numpy.vstack((feature_matrix, mfcc_delta))
if include_acceleration:
# Acceleration coefficients (aka delta)
mfcc_delta2 = librosa.feature.delta(mfcc, order=2, **acceleration_params)
# Add Acceleration Coefficients to feature matrix
feature_matrix = numpy.vstack((feature_matrix, mfcc_delta2))
if not include_mfcc0:
# Omit mfcc0
feature_matrix = feature_matrix[1:, :]
feature_matrix = feature_matrix.T
# Collect into data structure
if statistics:
return {
'feat': feature_matrix,
'stat': {
'mean': numpy.mean(feature_matrix, axis=0),
'std': numpy.std(feature_matrix, axis=0),
'N': feature_matrix.shape[0],
'S1': numpy.sum(feature_matrix, axis=0),
'S2': numpy.sum(feature_matrix ** 2, axis=0),
}
}
else:
return {
'feat': feature_matrix}
class FeatureNormalizer(object):
"""Feature normalizer class
Accumulates feature statistics
Examples
--------
>>> normalizer = FeatureNormalizer()
>>> for feature_matrix in training_items:
>>> normalizer.accumulate(feature_matrix)
>>>
>>> normalizer.finalize()
>>> for feature_matrix in test_items:
>>> feature_matrix_normalized = normalizer.normalize(feature_matrix)
>>> # used the features
"""
def __init__(self, feature_matrix=None):
"""__init__ method.
Parameters
----------
feature_matrix : numpy.ndarray [shape=(frames, number of feature values)] or None
Feature matrix to be used in the initialization
"""
if feature_matrix is None:
self.N = 0
self.mean = 0
self.S1 = 0
self.S2 = 0
self.std = 0
else:
self.mean = numpy.mean(feature_matrix, axis=0)
self.std = numpy.std(feature_matrix, axis=0)
self.N = feature_matrix.shape[0]
self.S1 = numpy.sum(feature_matrix, axis=0)
self.S2 = numpy.sum(feature_matrix ** 2, axis=0)
self.finalize()
def __enter__(self):
# Initialize Normalization class and return it
self.N = 0
self.mean = 0
self.S1 = 0
self.S2 = 0
self.std = 0
return self
def __exit__(self, type, value, traceback):
# Finalize accumulated calculation
self.finalize()
def accumulate(self, stat):
"""Accumalate statistics
Input is statistics dict, format:
{
'mean': numpy.mean(feature_matrix, axis=0),
'std': numpy.std(feature_matrix, axis=0),
'N': feature_matrix.shape[0],
'S1': numpy.sum(feature_matrix, axis=0),
'S2': numpy.sum(feature_matrix ** 2, axis=0),
}
Parameters
----------
stat : dict
Statistics dict
Returns
-------
nothing
"""
self.N += stat['N']
self.mean += stat['mean']
self.S1 += stat['S1']
self.S2 += stat['S2']
def finalize(self):
"""Finalize statistics calculation
Accumulated values are used to get mean and std for the seen feature data.
Parameters
----------
nothing
Returns
-------
nothing
"""
# Finalize statistics
self.mean = self.S1 / self.N
self.std = numpy.sqrt((self.N * self.S2 - (self.S1 * self.S1)) / (self.N * (self.N - 1)))
# In case we have very brain-death material we get std = Nan => 0.0
self.std = numpy.nan_to_num(self.std)
self.mean = numpy.reshape(self.mean, [1, -1])
self.std = numpy.reshape(self.std, [1, -1])
def normalize(self, feature_matrix):
"""Normalize feature matrix with internal statistics of the class
Parameters
----------
feature_matrix : numpy.ndarray [shape=(frames, number of feature values)]
Feature matrix to be normalized
Returns
-------
feature_matrix : numpy.ndarray [shape=(frames, number of feature values)]
Normalized feature matrix
"""
return (feature_matrix - self.mean) / self.std
| mit |
diagramsoftware/odoo | addons/l10n_th/__openerp__.py | 260 | 1453 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Thailand - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
Chart of Accounts for Thailand.
===============================
Thai accounting chart and localization.
""",
'author': 'Almacom',
'website': 'http://almacom.co.th/',
'depends': ['account_chart'],
'data': [ 'account_data.xml' ],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aequitas/home-assistant | tests/components/history/test_init.py | 12 | 20961 | """The tests the History component."""
# pylint: disable=protected-access,invalid-name
from datetime import timedelta
import unittest
from unittest.mock import patch, sentinel
from homeassistant.setup import setup_component, async_setup_component
import homeassistant.core as ha
import homeassistant.util.dt as dt_util
from homeassistant.components import history, recorder
from tests.common import (
init_recorder_component, mock_state_change_event, get_test_home_assistant)
class TestComponentHistory(unittest.TestCase):
"""Test History component."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def init_recorder(self):
"""Initialize the recorder."""
init_recorder_component(self.hass)
self.hass.start()
self.wait_recording_done()
def wait_recording_done(self):
"""Block till recording is done."""
self.hass.block_till_done()
self.hass.data[recorder.DATA_INSTANCE].block_till_done()
def test_setup(self):
"""Test setup method of history."""
config = history.CONFIG_SCHEMA({
# ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_DOMAINS: ['media_player'],
history.CONF_ENTITIES: ['thermostat.test']},
history.CONF_EXCLUDE: {
history.CONF_DOMAINS: ['thermostat'],
history.CONF_ENTITIES: ['media_player.test']}}})
self.init_recorder()
assert setup_component(self.hass, history.DOMAIN, config)
def test_get_states(self):
"""Test getting states at a specific point in time."""
self.init_recorder()
states = []
now = dt_util.utcnow()
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=now):
for i in range(5):
state = ha.State(
'test.point_in_time_{}'.format(i % 5),
"State {}".format(i),
{'attribute_test': i})
mock_state_change_event(self.hass, state)
states.append(state)
self.wait_recording_done()
future = now + timedelta(seconds=1)
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=future):
for i in range(5):
state = ha.State(
'test.point_in_time_{}'.format(i % 5),
"State {}".format(i),
{'attribute_test': i})
mock_state_change_event(self.hass, state)
self.wait_recording_done()
# Get states returns everything before POINT
for state1, state2 in zip(
states, sorted(history.get_states(self.hass, future),
key=lambda state: state.entity_id)):
assert state1 == state2
# Test get_state here because we have a DB setup
assert states[0] == \
history.get_state(self.hass, future, states[0].entity_id)
def test_state_changes_during_period(self):
"""Test state change during period."""
self.init_recorder()
entity_id = 'media_player.test'
def set_state(state):
"""Set the state."""
self.hass.states.set(entity_id, state)
self.wait_recording_done()
return self.hass.states.get(entity_id)
start = dt_util.utcnow()
point = start + timedelta(seconds=1)
end = point + timedelta(seconds=1)
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=start):
set_state('idle')
set_state('YouTube')
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=point):
states = [
set_state('idle'),
set_state('Netflix'),
set_state('Plex'),
set_state('YouTube'),
]
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=end):
set_state('Netflix')
set_state('Plex')
hist = history.state_changes_during_period(
self.hass, start, end, entity_id)
assert states == hist[entity_id]
def test_get_last_state_changes(self):
"""Test number of state changes."""
self.init_recorder()
entity_id = 'sensor.test'
def set_state(state):
"""Set the state."""
self.hass.states.set(entity_id, state)
self.wait_recording_done()
return self.hass.states.get(entity_id)
start = dt_util.utcnow() - timedelta(minutes=2)
point = start + timedelta(minutes=1)
point2 = point + timedelta(minutes=1)
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=start):
set_state('1')
states = []
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=point):
states.append(set_state('2'))
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=point2):
states.append(set_state('3'))
hist = history.get_last_state_changes(
self.hass, 2, entity_id)
assert states == hist[entity_id]
def test_get_significant_states(self):
"""Test that only significant states are returned.
We should get back every thermostat change that
includes an attribute change, but only the state updates for
media player (attribute changes are not significant and not returned).
"""
zero, four, states = self.record_states()
hist = history.get_significant_states(
self.hass, zero, four, filters=history.Filters())
assert states == hist
def test_get_significant_states_with_initial(self):
"""Test that only significant states are returned.
We should get back every thermostat change that
includes an attribute change, but only the state updates for
media player (attribute changes are not significant and not returned).
"""
zero, four, states = self.record_states()
one = zero + timedelta(seconds=1)
one_and_half = zero + timedelta(seconds=1.5)
for entity_id in states:
if entity_id == 'media_player.test':
states[entity_id] = states[entity_id][1:]
for state in states[entity_id]:
if state.last_changed == one:
state.last_changed = one_and_half
hist = history.get_significant_states(
self.hass, one_and_half, four, filters=history.Filters(),
include_start_time_state=True)
assert states == hist
def test_get_significant_states_without_initial(self):
"""Test that only significant states are returned.
We should get back every thermostat change that
includes an attribute change, but only the state updates for
media player (attribute changes are not significant and not returned).
"""
zero, four, states = self.record_states()
one = zero + timedelta(seconds=1)
one_and_half = zero + timedelta(seconds=1.5)
for entity_id in states:
states[entity_id] = list(filter(
lambda s: s.last_changed != one, states[entity_id]))
del states['media_player.test2']
hist = history.get_significant_states(
self.hass, one_and_half, four, filters=history.Filters(),
include_start_time_state=False)
assert states == hist
def test_get_significant_states_entity_id(self):
"""Test that only significant states are returned for one entity."""
zero, four, states = self.record_states()
del states['media_player.test2']
del states['thermostat.test']
del states['thermostat.test2']
del states['script.can_cancel_this_one']
hist = history.get_significant_states(
self.hass, zero, four, ['media_player.test'],
filters=history.Filters())
assert states == hist
def test_get_significant_states_multiple_entity_ids(self):
"""Test that only significant states are returned for one entity."""
zero, four, states = self.record_states()
del states['media_player.test2']
del states['thermostat.test2']
del states['script.can_cancel_this_one']
hist = history.get_significant_states(
self.hass, zero, four, ['media_player.test', 'thermostat.test'],
filters=history.Filters())
assert states == hist
def test_get_significant_states_exclude_domain(self):
"""Test if significant states are returned when excluding domains.
We should get back every thermostat change that includes an attribute
change, but no media player changes.
"""
zero, four, states = self.record_states()
del states['media_player.test']
del states['media_player.test2']
config = history.CONFIG_SCHEMA({
ha.DOMAIN: {},
history.DOMAIN: {history.CONF_EXCLUDE: {
history.CONF_DOMAINS: ['media_player', ]}}})
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_exclude_entity(self):
"""Test if significant states are returned when excluding entities.
We should get back every thermostat and script changes, but no media
player changes.
"""
zero, four, states = self.record_states()
del states['media_player.test']
config = history.CONFIG_SCHEMA({
ha.DOMAIN: {},
history.DOMAIN: {history.CONF_EXCLUDE: {
history.CONF_ENTITIES: ['media_player.test', ]}}})
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_exclude(self):
"""Test significant states when excluding entities and domains.
We should not get back every thermostat and media player test changes.
"""
zero, four, states = self.record_states()
del states['media_player.test']
del states['thermostat.test']
del states['thermostat.test2']
config = history.CONFIG_SCHEMA({
ha.DOMAIN: {},
history.DOMAIN: {history.CONF_EXCLUDE: {
history.CONF_DOMAINS: ['thermostat', ],
history.CONF_ENTITIES: ['media_player.test', ]}}})
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_exclude_include_entity(self):
"""Test significant states when excluding domains and include entities.
We should not get back every thermostat and media player test changes.
"""
zero, four, states = self.record_states()
del states['media_player.test2']
del states['thermostat.test']
del states['thermostat.test2']
del states['script.can_cancel_this_one']
config = history.CONFIG_SCHEMA({
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_ENTITIES: ['media_player.test',
'thermostat.test']},
history.CONF_EXCLUDE: {
history.CONF_DOMAINS: ['thermostat']}}})
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_include_domain(self):
"""Test if significant states are returned when including domains.
We should get back every thermostat and script changes, but no media
player changes.
"""
zero, four, states = self.record_states()
del states['media_player.test']
del states['media_player.test2']
config = history.CONFIG_SCHEMA({
ha.DOMAIN: {},
history.DOMAIN: {history.CONF_INCLUDE: {
history.CONF_DOMAINS: ['thermostat', 'script']}}})
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_include_entity(self):
"""Test if significant states are returned when including entities.
We should only get back changes of the media_player.test entity.
"""
zero, four, states = self.record_states()
del states['media_player.test2']
del states['thermostat.test']
del states['thermostat.test2']
del states['script.can_cancel_this_one']
config = history.CONFIG_SCHEMA({
ha.DOMAIN: {},
history.DOMAIN: {history.CONF_INCLUDE: {
history.CONF_ENTITIES: ['media_player.test']}}})
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_include(self):
"""Test significant states when including domains and entities.
We should only get back changes of the media_player.test entity and the
thermostat domain.
"""
zero, four, states = self.record_states()
del states['media_player.test2']
del states['script.can_cancel_this_one']
config = history.CONFIG_SCHEMA({
ha.DOMAIN: {},
history.DOMAIN: {history.CONF_INCLUDE: {
history.CONF_DOMAINS: ['thermostat'],
history.CONF_ENTITIES: ['media_player.test']}}})
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_include_exclude_domain(self):
"""Test if significant states when excluding and including domains.
We should not get back any changes since we include only the
media_player domain but also exclude it.
"""
zero, four, states = self.record_states()
del states['media_player.test']
del states['media_player.test2']
del states['thermostat.test']
del states['thermostat.test2']
del states['script.can_cancel_this_one']
config = history.CONFIG_SCHEMA({
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_DOMAINS: ['media_player']},
history.CONF_EXCLUDE: {
history.CONF_DOMAINS: ['media_player']}}})
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_include_exclude_entity(self):
"""Test if significant states when excluding and including domains.
We should not get back any changes since we include only
media_player.test but also exclude it.
"""
zero, four, states = self.record_states()
del states['media_player.test']
del states['media_player.test2']
del states['thermostat.test']
del states['thermostat.test2']
del states['script.can_cancel_this_one']
config = history.CONFIG_SCHEMA({
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_ENTITIES: ['media_player.test']},
history.CONF_EXCLUDE: {
history.CONF_ENTITIES: ['media_player.test']}}})
self.check_significant_states(zero, four, states, config)
def test_get_significant_states_include_exclude(self):
"""Test if significant states when in/excluding domains and entities.
We should only get back changes of the media_player.test2 entity.
"""
zero, four, states = self.record_states()
del states['media_player.test']
del states['thermostat.test']
del states['thermostat.test2']
del states['script.can_cancel_this_one']
config = history.CONFIG_SCHEMA({
ha.DOMAIN: {},
history.DOMAIN: {
history.CONF_INCLUDE: {
history.CONF_DOMAINS: ['media_player'],
history.CONF_ENTITIES: ['thermostat.test']},
history.CONF_EXCLUDE: {
history.CONF_DOMAINS: ['thermostat'],
history.CONF_ENTITIES: ['media_player.test']}}})
self.check_significant_states(zero, four, states, config)
def check_significant_states(self, zero, four, states, config):
"""Check if significant states are retrieved."""
filters = history.Filters()
exclude = config[history.DOMAIN].get(history.CONF_EXCLUDE)
if exclude:
filters.excluded_entities = exclude.get(history.CONF_ENTITIES, [])
filters.excluded_domains = exclude.get(history.CONF_DOMAINS, [])
include = config[history.DOMAIN].get(history.CONF_INCLUDE)
if include:
filters.included_entities = include.get(history.CONF_ENTITIES, [])
filters.included_domains = include.get(history.CONF_DOMAINS, [])
hist = history.get_significant_states(
self.hass, zero, four, filters=filters)
assert states == hist
def record_states(self):
"""Record some test states.
We inject a bunch of state updates from media player, zone and
thermostat.
"""
self.init_recorder()
mp = 'media_player.test'
mp2 = 'media_player.test2'
therm = 'thermostat.test'
therm2 = 'thermostat.test2'
zone = 'zone.home'
script_nc = 'script.cannot_cancel_this_one'
script_c = 'script.can_cancel_this_one'
def set_state(entity_id, state, **kwargs):
"""Set the state."""
self.hass.states.set(entity_id, state, **kwargs)
self.wait_recording_done()
return self.hass.states.get(entity_id)
zero = dt_util.utcnow()
one = zero + timedelta(seconds=1)
two = one + timedelta(seconds=1)
three = two + timedelta(seconds=1)
four = three + timedelta(seconds=1)
states = {therm: [], therm2: [], mp: [], mp2: [], script_c: []}
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=one):
states[mp].append(
set_state(mp, 'idle',
attributes={'media_title': str(sentinel.mt1)}))
states[mp].append(
set_state(mp, 'YouTube',
attributes={'media_title': str(sentinel.mt2)}))
states[mp2].append(
set_state(mp2, 'YouTube',
attributes={'media_title': str(sentinel.mt2)}))
states[therm].append(
set_state(therm, 20, attributes={'current_temperature': 19.5}))
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=two):
# This state will be skipped only different in time
set_state(mp, 'YouTube',
attributes={'media_title': str(sentinel.mt3)})
# This state will be skipped because domain blacklisted
set_state(zone, 'zoning')
set_state(script_nc, 'off')
states[script_c].append(
set_state(script_c, 'off', attributes={'can_cancel': True}))
states[therm].append(
set_state(therm, 21, attributes={'current_temperature': 19.8}))
states[therm2].append(
set_state(therm2, 20, attributes={'current_temperature': 19}))
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=three):
states[mp].append(
set_state(mp, 'Netflix',
attributes={'media_title': str(sentinel.mt4)}))
# Attributes changed even though state is the same
states[therm].append(
set_state(therm, 21, attributes={'current_temperature': 20}))
# state will be skipped since entity is hidden
set_state(therm, 22, attributes={'current_temperature': 21,
'hidden': True})
return zero, four, states
async def test_fetch_period_api(hass, hass_client):
"""Test the fetch period view for history."""
await hass.async_add_job(init_recorder_component, hass)
await async_setup_component(hass, 'history', {})
await hass.async_add_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(
'/api/history/period/{}'.format(dt_util.utcnow().isoformat()))
assert response.status == 200
| apache-2.0 |
mantidproject/mantid | Framework/PythonInterface/test/python/mantid/kernel/CompositeValidatorTest.py | 3 | 2871 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.kernel import CompositeValidator, CompositeRelation, FloatBoundedValidator
from mantid.api import PythonAlgorithm
class CompositeValidatorTest(unittest.TestCase):
def test_creation_with_add_succeeds_correctly_in_algorithm(self):
"""
Tests that a composite validator created with the add
method validates correctly
"""
validation = CompositeValidator()
validation.add(FloatBoundedValidator(lower=5))
validation.add(FloatBoundedValidator(upper=10))
self._do_validation_test(validation)
def test_creation_with_constructor_and_list(self):
"""
Tests that a composite validator created with the constructor method
"""
validation = CompositeValidator([FloatBoundedValidator(lower=5), FloatBoundedValidator(upper=10)])
self._do_validation_test(validation)
def test_composite_validator_with_or_relation(self):
validation = CompositeValidator([FloatBoundedValidator(lower=5, upper=10),
FloatBoundedValidator(lower=15, upper=20)],
relation=CompositeRelation.OR)
test_alg = self._create_test_algorithm(validation)
prop = test_alg.getProperty("Input")
self.assertNotEquals(prop.isValid, "")
test_alg.setProperty("Input", 6.8)
self.assertEqual(prop.isValid, "")
test_alg.setProperty("Input", 17.3)
self.assertEqual(prop.isValid, "")
self.assertRaises(ValueError, test_alg.setProperty, "Input", 3.0)
self.assertRaises(ValueError, test_alg.setProperty, "Input", 13.0)
self.assertRaises(ValueError, test_alg.setProperty, "Input", 23.0)
def _do_validation_test(self, validation):
"""Run the validator tests"""
test_alg = self._create_test_algorithm(validation)
prop = test_alg.getProperty("Input")
self.assertNotEquals(prop.isValid, "")
test_alg.setProperty("Input", 6.8)
self.assertEqual(prop.isValid, "")
self.assertRaises(ValueError, test_alg.setProperty, "Input", 15)
def _create_test_algorithm(self, validator):
"""Create a test algorithm"""
class TestAlgorithm(PythonAlgorithm):
def PyInit(self):
self.declareProperty("Input", -1.0, validator)
def PyExec(self):
pass
alg = TestAlgorithm()
alg.initialize()
return alg
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
everhopingandwaiting/tg | tg-test.py | 199 | 1450 | import tgl
import pprint
from functools import partial
our_id = 0
pp = pprint.PrettyPrinter(indent=4)
binlog_done = False;
def on_binlog_replay_end():
binlog_done = True;
def on_get_difference_end():
pass
def on_our_id(id):
our_id = id
return "Set ID: " + str(our_id)
def msg_cb(success, msg):
pp.pprint(success)
pp.pprint(msg)
HISTORY_QUERY_SIZE = 100
def history_cb(msg_list, peer, success, msgs):
print(len(msgs))
msg_list.extend(msgs)
print(len(msg_list))
if len(msgs) == HISTORY_QUERY_SIZE:
tgl.get_history(peer, len(msg_list), HISTORY_QUERY_SIZE, partial(history_cb, msg_list, peer));
def cb(success):
print(success)
def on_msg_receive(msg):
if msg.out and not binlog_done:
return;
if msg.dest.id == our_id: # direct message
peer = msg.src
else: # chatroom
peer = msg.dest
pp.pprint(msg)
if msg.text.startswith("!ping"):
peer.send_msg("PONG! google.com", preview=False, reply=msg.id)
def on_secret_chat_update(peer, types):
return "on_secret_chat_update"
def on_user_update():
pass
def on_chat_update():
pass
# Set callbacks
tgl.set_on_binlog_replay_end(on_binlog_replay_end)
tgl.set_on_get_difference_end(on_get_difference_end)
tgl.set_on_our_id(on_our_id)
tgl.set_on_msg_receive(on_msg_receive)
tgl.set_on_secret_chat_update(on_secret_chat_update)
tgl.set_on_user_update(on_user_update)
tgl.set_on_chat_update(on_chat_update)
| gpl-2.0 |
DanielNeugebauer/adhocracy | src/adhocracy/migration/versions/013_Remove_issues.py | 4 | 2793 | from datetime import datetime
from sqlalchemy import *
from migrate import *
meta = MetaData()
delegateable_table = Table('delegateable', meta,
Column('id', Integer, primary_key=True),
Column('label', Unicode(255), nullable=False),
Column('type', String(50)),
Column('create_time', DateTime, default=datetime.utcnow),
Column('access_time', DateTime, default=datetime.utcnow, onupdate=datetime.utcnow),
Column('delete_time', DateTime, nullable=True),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False),
Column('instance_id', Integer, ForeignKey('instance.id'), nullable=False)
)
comment_table = Table('comment', meta,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('delete_time', DateTime, default=None, nullable=True),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False),
Column('topic_id', Integer, ForeignKey('delegateable.id'), nullable=False),
Column('canonical', Boolean, default=False),
Column('wiki', Boolean, default=False),
Column('reply_id', Integer, ForeignKey('comment.id'), nullable=True),
Column('poll_id', Integer, ForeignKey('poll.id'), nullable=True)
)
issue_table = Table('issue', meta,
Column('id', Integer, ForeignKey('delegateable.id'), primary_key=True),
Column('comment_id', Integer, ForeignKey('comment.id'), nullable=True)
)
category_graph = Table('category_graph', meta,
Column('parent_id', Integer, ForeignKey('delegateable.id')),
Column('child_id', Integer, ForeignKey('delegateable.id'))
)
poll_table = Table('poll', meta,
Column('id', Integer, primary_key=True),
Column('begin_time', DateTime, default=datetime.utcnow),
Column('end_time', DateTime, nullable=True),
Column('user_id', Integer, ForeignKey('user.id'), nullable=False),
Column('action', Unicode(50), nullable=False),
Column('subject', UnicodeText(), nullable=False),
Column('scope_id', Integer, ForeignKey('delegateable.id'), nullable=False)
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
issue_table.drop()
for vals in migrate_engine.execute(delegateable_table.select()):
if vals[2] == 'issue':
migrate_engine.execute(category_graph.delete(category_graph.c.parent_id == vals[0]))
migrate_engine.execute(category_graph.delete(category_graph.c.child_id == vals[0]))
migrate_engine.execute(comment_table.delete(comment_table.c.topic_id == vals[0]))
migrate_engine.execute(poll_table.delete(poll_table.c.scope_id == vals[0]))
migrate_engine.execute(delegateable_table.delete(delegateable_table.c.id == vals[0]))
def downgrade(migrate_engine):
raise NotImplementedError()
| agpl-3.0 |
Eseoghene/bite-project | deps/mrtaskman/server/mapreduce/lib/files/testutil.py | 44 | 1888 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Testing utils for writing tests involving Files API."""
__all__ = ['TestFileServiceStub']
from google.appengine.api import apiproxy_stub
class TestFileServiceStub(apiproxy_stub.APIProxyStub):
"""A FileServiceStub to be used with tests.
Doesn't perform any kind of file validation and stores all
file content in memory.
Can be used to test low-level file calls only, because it doesn't
support all features (like blobstore files).
"""
def __init__(self):
super(TestFileServiceStub, self).__init__('file')
self._file_content = {}
def _Dynamic_Open(self, request, response):
pass
def _Dynamic_Close(self, request, response):
pass
def _Dynamic_Append(self, request, response):
self._file_content[request.filename()] = (
self.get_content(request.filename()) + request.data())
def _Dynamic_Read(self, request, response):
content = self._file_content[request.filename()]
pos = request.pos()
response.set_data(content[pos:pos + request.max_bytes()])
def get_content(self, filename):
"""Get current in-memory file content."""
return self._file_content.get(filename, '')
def set_content(self, filename, content):
"""Set current in-memory file content."""
self._file_content[filename] = content
| apache-2.0 |
sunze/py_flask | venv/lib/python3.4/site-packages/pip/utils/build.py | 899 | 1312 | from __future__ import absolute_import
import os.path
import tempfile
from pip.utils import rmtree
class BuildDirectory(object):
def __init__(self, name=None, delete=None):
# If we were not given an explicit directory, and we were not given an
# explicit delete option, then we'll default to deleting.
if name is None and delete is None:
delete = True
if name is None:
# We realpath here because some systems have their default tmpdir
# symlinked to another directory. This tends to confuse build
# scripts, so we canonicalize the path by traversing potential
# symlinks here.
name = os.path.realpath(tempfile.mkdtemp(prefix="pip-build-"))
# If we were not given an explicit directory, and we were not given
# an explicit delete option, then we'll default to deleting.
if delete is None:
delete = True
self.name = name
self.delete = delete
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self.delete:
rmtree(self.name)
| mit |
h2educ/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
haojin2/peloton | script/formatting/formatter.py | 1 | 6784 | #!/usr/bin/env python
# encoding: utf-8
## ==============================================
## GOAL : Format code, Update headers
## ==============================================
import argparse
import logging
import os
import re
import sys
import datetime
import subprocess
## ==============================================
## CONFIGURATION
## ==============================================
# NOTE: absolute path to peloton directory is calculated from current directory
# directory structure: peloton/scripts/formatting/<this_file>
# PELOTON_DIR needs to be redefined if the directory structure is changed
CODE_SOURCE_DIR = os.path.abspath(os.path.dirname(__file__))
PELOTON_DIR = reduce(os.path.join, [CODE_SOURCE_DIR, os.path.pardir, os.path.pardir])
#other directory paths used are relative to peloton_dir
PELOTON_SRC_DIR = os.path.join(PELOTON_DIR, "src")
PELOTON_TESTS_DIR = os.path.join(PELOTON_DIR, "test")
# DEFAULT DIRS
DEFAULT_DIRS = []
DEFAULT_DIRS.append(PELOTON_SRC_DIR)
DEFAULT_DIRS.append(PELOTON_TESTS_DIR)
CLANG_FORMAT = "clang-format-3.6"
## ==============================================
## HEADER CONFIGURATION
## ==============================================
#header framework, dynamic information will be added inside function
header_comment_line_1 = "//===----------------------------------------------------------------------===//\n"
header_comment_line_1 += "//\n"
header_comment_line_1 += "// Peloton\n"
header_comment_line_2 = "//\n"
header_comment_line_3 = "// "
header_comment_line_4 = "//\n"
header_comment_line_5 = "// Identification: "
header_comment_line_6 = "//\n"
header_comment_line_7 = "// Copyright (c) 2015-%d, Carnegie Mellon University Database Group\n" % datetime.datetime.now().year
header_comment_line_8 = "//\n"
header_comment_line_9 = "//===----------------------------------------------------------------------===//\n\n\n"
header_comment_1 = header_comment_line_1 + header_comment_line_2
header_comment_3 = header_comment_line_4
header_comment_5 = header_comment_line_6 + header_comment_line_7 + header_comment_line_8 \
+ header_comment_line_9
#regular expresseion used to track header
header_regex = re.compile("((\/\/===-*===\/\/\n(\/\/.*\n)*\/\/===-*===\/\/[\n]*)\n\n)*")
## ==============================================
## LOGGING CONFIGURATION
## ==============================================
LOG = logging.getLogger(__name__)
LOG_handler = logging.StreamHandler()
LOG_formatter = logging.Formatter(
fmt='%(asctime)s [%(funcName)s:%(lineno)03d] %(levelname)-5s: %(message)s',
datefmt='%m-%d-%Y %H:%M:%S'
)
LOG_handler.setFormatter(LOG_formatter)
LOG.addHandler(LOG_handler)
LOG.setLevel(logging.INFO)
## ==============================================
## UTILITY FUNCTION DEFINITIONS
## ==============================================
#format the file passed as argument
def format_file(file_path, update_header, clang_format_code):
file_name = os.path.basename(file_path)
abs_path = os.path.abspath(file_path)
rel_path_from_peloton_dir = os.path.relpath(abs_path,PELOTON_DIR)
with open(file_path, "r+") as fd:
file_data = fd.read()
if update_header:
# strip old header if it exists
header_match = header_regex.match(file_data)
if not header_match is None:
LOG.info("Strip header from %s", file_name)
header_comment = header_match.group()
LOG.debug("Header comment : %s", header_comment)
file_data = file_data.replace(header_comment,"")
# add new header
LOG.info("Add header to %s", file_name)
header_comment_2 = header_comment_line_3 + file_name + "\n"
header_comment_4 = header_comment_line_5 + rel_path_from_peloton_dir + "\n"
header_comment = header_comment_1 + header_comment_2 + header_comment_3 \
+ header_comment_4 + header_comment_5
#print header_comment
file_data = header_comment + file_data
fd.seek(0,0)
fd.truncate()
fd.write(file_data)
elif clang_format_code:
formatting_command = CLANG_FORMAT + " -style=file " + " -i " + file_path
LOG.info(formatting_command)
subprocess.call([CLANG_FORMAT, "-style=file", "-i", file_path])
#END WITH
fd.close()
#END FORMAT__FILE(FILE_NAME)
#format all the files in the dir passed as argument
def format_dir(dir_path, update_header, clang_format_code):
for subdir, dirs, files in os.walk(dir_path):
for file in files:
#print os.path.join(subdir, file)
file_path = subdir + os.path.sep + file
if file_path.endswith(".h") or file_path.endswith(".cpp"):
format_file(file_path, update_header, clang_format_code)
#END IF
#END FOR [file]
#END FOR [os.walk]
#END ADD_HEADERS_DIR(DIR_PATH)
## ==============================================
## Main Function
## ==============================================
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Update headers and/or format source code')
parser.add_argument("-u", "--update-header", help='Action: Update existing headers or add new ones', action='store_true')
parser.add_argument("-c", "--clang-format-code", help='Action: Apply clang-format to source code', action='store_true')
parser.add_argument("-f", "--staged-files", help='Action: Apply the selected action(s) to all staged files (git)', action='store_true')
parser.add_argument('paths', metavar='PATH', type=str, nargs='*',
help='Files or directories to (recursively) apply the actions to')
args = parser.parse_args()
if args.staged_files:
targets = [os.path.abspath(os.path.join(PELOTON_DIR, f)) for f in subprocess.check_output(["git", "diff", "--name-only", "HEAD", "--cached", "--diff-filter=d"]).split()]
if not targets:
LOG.error("no staged files or not calling from a repository -- exiting")
sys.exit("no staged files or not calling from a repository")
elif not args.paths:
LOG.error("no files or directories given -- exiting")
sys.exit("no files or directories given")
else:
targets = args.paths
for x in targets:
if os.path.isfile(x):
LOG.info("Scanning file: " + x)
format_file(x, args.update_header, args.clang_format_code)
elif os.path.isdir(x):
LOG.info("Scanning directory " + x)
format_dir(x, args.update_header, args.clang_format_code)
## FOR
## IF
| apache-2.0 |
KimNorgaard/ansible-modules-extras | cloud/amazon/ecs_cluster.py | 60 | 7977 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ecs_cluster
short_description: create or terminate ecs clusters
notes:
- When deleting a cluster, the information returned is the state of the cluster prior to deletion.
- It will also wait for a cluster to have instances registered to it.
description:
- Creates or terminates ecs clusters.
version_added: "2.0"
author: Mark Chance(@Java1Guy)
requirements: [ boto, boto3 ]
options:
state:
description:
- The desired state of the cluster
required: true
choices: ['present', 'absent', 'has_instances']
name:
description:
- The cluster name
required: true
delay:
description:
- Number of seconds to wait
required: false
repeat:
description:
- The number of times to wait for the cluster to have an instance
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Cluster creation
- ecs_cluster:
name: default
state: present
# Cluster deletion
- ecs_cluster:
name: default
state: absent
- name: Wait for register
ecs_cluster:
name: "{{ new_cluster }}"
state: has_instances
delay: 10
repeat: 10
register: task_output
'''
RETURN = '''
activeServicesCount:
description: how many services are active in this cluster
returned: 0 if a new cluster
type: int
clusterArn:
description: the ARN of the cluster just created
type: string (ARN)
sample: arn:aws:ecs:us-west-2:172139249013:cluster/test-cluster-mfshcdok
clusterName:
description: name of the cluster just created (should match the input argument)
type: string
sample: test-cluster-mfshcdok
pendingTasksCount:
description: how many tasks are waiting to run in this cluster
returned: 0 if a new cluster
type: int
registeredContainerInstancesCount:
description: how many container instances are available in this cluster
returned: 0 if a new cluster
type: int
runningTasksCount:
description: how many tasks are running in this cluster
returned: 0 if a new cluster
type: int
status:
description: the status of the new cluster
returned: ACTIVE
type: string
'''
import time
try:
import boto
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
class EcsClusterManager:
"""Handles ECS Clusters"""
def __init__(self, module):
self.module = module
try:
# self.ecs = boto3.client('ecs')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg="Can't authorize connection - "+str(e))
def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'):
for c in array_of_clusters:
if c[field_name].endswith(cluster_name):
return c
return None
def describe_cluster(self, cluster_name):
response = self.ecs.describe_clusters(clusters=[
cluster_name
])
if len(response['failures'])>0:
c = self.find_in_array(response['failures'], cluster_name, 'arn')
if c and c['reason']=='MISSING':
return None
# fall thru and look through found ones
if len(response['clusters'])>0:
c = self.find_in_array(response['clusters'], cluster_name)
if c:
return c
raise Exception("Unknown problem describing cluster %s." % cluster_name)
def create_cluster(self, clusterName = 'default'):
response = self.ecs.create_cluster(clusterName=clusterName)
return response['cluster']
def delete_cluster(self, clusterName):
return self.ecs.delete_cluster(cluster=clusterName)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent', 'has_instances'] ),
name=dict(required=True, type='str' ),
delay=dict(required=False, type='int', default=10),
repeat=dict(required=False, type='int', default=10)
))
required_together = ( ['state', 'name'] )
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
if not HAS_BOTO:
module.fail_json(msg='boto is required.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
cluster_mgr = EcsClusterManager(module)
try:
existing = cluster_mgr.describe_cluster(module.params['name'])
except Exception, e:
module.fail_json(msg="Exception describing cluster '"+module.params['name']+"': "+str(e))
results = dict(changed=False)
if module.params['state'] == 'present':
if existing and 'status' in existing and existing['status']=="ACTIVE":
results['cluster']=existing
else:
if not module.check_mode:
# doesn't exist. create it.
results['cluster'] = cluster_mgr.create_cluster(module.params['name'])
results['changed'] = True
# delete the cluster
elif module.params['state'] == 'absent':
if not existing:
pass
else:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['cluster'] = existing
if 'status' in existing and existing['status']=="INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
cluster_mgr.delete_cluster(module.params['name'])
results['changed'] = True
elif module.params['state'] == 'has_instances':
if not existing:
module.fail_json(msg="Cluster '"+module.params['name']+" not found.")
return
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
delay = module.params['delay']
repeat = module.params['repeat']
time.sleep(delay)
count = 0
for i in range(repeat):
existing = cluster_mgr.describe_cluster(module.params['name'])
count = existing['registeredContainerInstancesCount']
if count > 0:
results['changed'] = True
break
time.sleep(delay)
if count == 0 and i is repeat-1:
module.fail_json(msg="Cluster instance count still zero after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
return
module.exit_json(**results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.5/django/middleware/http.py | 225 | 1670 | from django.utils.http import http_date, parse_http_date_safe
class ConditionalGetMiddleware(object):
"""
Handles conditional GET operations. If the response has a ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers.
"""
def process_response(self, request, response):
response['Date'] = http_date()
if not response.streaming and not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
if_none_match = request.META.get('HTTP_IF_NONE_MATCH')
if if_none_match == response['ETag']:
# Setting the status is enough here. The response handling path
# automatically removes content for this status code (in
# http.conditional_content_removal()).
response.status_code = 304
if response.has_header('Last-Modified'):
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if if_modified_since is not None:
if_modified_since = parse_http_date_safe(if_modified_since)
if if_modified_since is not None:
last_modified = parse_http_date_safe(response['Last-Modified'])
if last_modified is not None and last_modified <= if_modified_since:
# Setting the status code is enough here (same reasons as
# above).
response.status_code = 304
return response
| mit |
ehashman/oh-mainline | vendor/packages/twisted/twisted/protocols/amp.py | 18 | 85502 | # -*- test-case-name: twisted.test.test_amp -*-
# Copyright (c) 2005 Divmod, Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module implements AMP, the Asynchronous Messaging Protocol.
AMP is a protocol for sending multiple asynchronous request/response pairs over
the same connection. Requests and responses are both collections of key/value
pairs.
AMP is a very simple protocol which is not an application. This module is a
"protocol construction kit" of sorts; it attempts to be the simplest wire-level
implementation of Deferreds. AMP provides the following base-level features:
- Asynchronous request/response handling (hence the name)
- Requests and responses are both key/value pairs
- Binary transfer of all data: all data is length-prefixed. Your
application will never need to worry about quoting.
- Command dispatching (like HTTP Verbs): the protocol is extensible, and
multiple AMP sub-protocols can be grouped together easily.
The protocol implementation also provides a few additional features which are
not part of the core wire protocol, but are nevertheless very useful:
- Tight TLS integration, with an included StartTLS command.
- Handshaking to other protocols: because AMP has well-defined message
boundaries and maintains all incoming and outgoing requests for you, you
can start a connection over AMP and then switch to another protocol.
This makes it ideal for firewall-traversal applications where you may
have only one forwarded port but multiple applications that want to use
it.
Using AMP with Twisted is simple. Each message is a command, with a response.
You begin by defining a command type. Commands specify their input and output
in terms of the types that they expect to see in the request and response
key-value pairs. Here's an example of a command that adds two integers, 'a'
and 'b'::
class Sum(amp.Command):
arguments = [('a', amp.Integer()),
('b', amp.Integer())]
response = [('total', amp.Integer())]
Once you have specified a command, you need to make it part of a protocol, and
define a responder for it. Here's a 'JustSum' protocol that includes a
responder for our 'Sum' command::
class JustSum(amp.AMP):
def sum(self, a, b):
total = a + b
print 'Did a sum: %d + %d = %d' % (a, b, total)
return {'total': total}
Sum.responder(sum)
Later, when you want to actually do a sum, the following expression will return
a L{Deferred} which will fire with the result::
ClientCreator(reactor, amp.AMP).connectTCP(...).addCallback(
lambda p: p.callRemote(Sum, a=13, b=81)).addCallback(
lambda result: result['total'])
Command responders may also return Deferreds, causing the response to be
sent only once the Deferred fires::
class DelayedSum(amp.AMP):
def slowSum(self, a, b):
total = a + b
result = defer.Deferred()
reactor.callLater(3, result.callback, {'total': total})
return result
Sum.responder(slowSum)
This is transparent to the caller.
You can also define the propagation of specific errors in AMP. For example,
for the slightly more complicated case of division, we might have to deal with
division by zero::
class Divide(amp.Command):
arguments = [('numerator', amp.Integer()),
('denominator', amp.Integer())]
response = [('result', amp.Float())]
errors = {ZeroDivisionError: 'ZERO_DIVISION'}
The 'errors' mapping here tells AMP that if a responder to Divide emits a
L{ZeroDivisionError}, then the other side should be informed that an error of
the type 'ZERO_DIVISION' has occurred. Writing a responder which takes
advantage of this is very simple - just raise your exception normally::
class JustDivide(amp.AMP):
def divide(self, numerator, denominator):
result = numerator / denominator
print 'Divided: %d / %d = %d' % (numerator, denominator, total)
return {'result': result}
Divide.responder(divide)
On the client side, the errors mapping will be used to determine what the
'ZERO_DIVISION' error means, and translated into an asynchronous exception,
which can be handled normally as any L{Deferred} would be::
def trapZero(result):
result.trap(ZeroDivisionError)
print "Divided by zero: returning INF"
return 1e1000
ClientCreator(reactor, amp.AMP).connectTCP(...).addCallback(
lambda p: p.callRemote(Divide, numerator=1234,
denominator=0)
).addErrback(trapZero)
For a complete, runnable example of both of these commands, see the files in
the Twisted repository::
doc/core/examples/ampserver.py
doc/core/examples/ampclient.py
On the wire, AMP is a protocol which uses 2-byte lengths to prefix keys and
values, and empty keys to separate messages::
<2-byte length><key><2-byte length><value>
<2-byte length><key><2-byte length><value>
...
<2-byte length><key><2-byte length><value>
<NUL><NUL> # Empty Key == End of Message
And so on. Because it's tedious to refer to lengths and NULs constantly, the
documentation will refer to packets as if they were newline delimited, like
so::
C: _command: sum
C: _ask: ef639e5c892ccb54
C: a: 13
C: b: 81
S: _answer: ef639e5c892ccb54
S: total: 94
Notes:
In general, the order of keys is arbitrary. Specific uses of AMP may impose an
ordering requirement, but unless this is specified explicitly, any ordering may
be generated and any ordering must be accepted. This applies to the
command-related keys I{_command} and I{_ask} as well as any other keys.
Values are limited to the maximum encodable size in a 16-bit length, 65535
bytes.
Keys are limited to the maximum encodable size in a 8-bit length, 255 bytes.
Note that we still use 2-byte lengths to encode keys. This small redundancy
has several features:
- If an implementation becomes confused and starts emitting corrupt data,
or gets keys confused with values, many common errors will be signalled
immediately instead of delivering obviously corrupt packets.
- A single NUL will separate every key, and a double NUL separates
messages. This provides some redundancy when debugging traffic dumps.
- NULs will be present at regular intervals along the protocol, providing
some padding for otherwise braindead C implementations of the protocol,
so that <stdio.h> string functions will see the NUL and stop.
- This makes it possible to run an AMP server on a port also used by a
plain-text protocol, and easily distinguish between non-AMP clients (like
web browsers) which issue non-NUL as the first byte, and AMP clients,
which always issue NUL as the first byte.
"""
__metaclass__ = type
import types, warnings
from cStringIO import StringIO
from struct import pack
import decimal, datetime
from zope.interface import Interface, implements
from twisted.python.compat import set
from twisted.python.util import unsignedID
from twisted.python.reflect import accumulateClassDict
from twisted.python.failure import Failure
from twisted.python import log, filepath
from twisted.internet.main import CONNECTION_LOST
from twisted.internet.error import PeerVerifyError, ConnectionLost
from twisted.internet.error import ConnectionClosed
from twisted.internet.defer import Deferred, maybeDeferred, fail
from twisted.protocols.basic import Int16StringReceiver, StatefulStringProtocol
try:
from twisted.internet import ssl
except ImportError:
ssl = None
if ssl and not ssl.supported:
ssl = None
if ssl is not None:
from twisted.internet.ssl import CertificateOptions, Certificate, DN, KeyPair
ASK = '_ask'
ANSWER = '_answer'
COMMAND = '_command'
ERROR = '_error'
ERROR_CODE = '_error_code'
ERROR_DESCRIPTION = '_error_description'
UNKNOWN_ERROR_CODE = 'UNKNOWN'
UNHANDLED_ERROR_CODE = 'UNHANDLED'
MAX_KEY_LENGTH = 0xff
MAX_VALUE_LENGTH = 0xffff
class IArgumentType(Interface):
"""
An L{IArgumentType} can serialize a Python object into an AMP box and
deserialize information from an AMP box back into a Python object.
@since: 9.0
"""
def fromBox(name, strings, objects, proto):
"""
Given an argument name and an AMP box containing serialized values,
extract one or more Python objects and add them to the C{objects}
dictionary.
@param name: The name associated with this argument. Most commonly,
this is the key which can be used to find a serialized value in
C{strings} and which should be used as the key in C{objects} to
associate with a structured Python object.
@type name: C{str}
@param strings: The AMP box from which to extract one or more
values.
@type strings: C{dict}
@param objects: The output dictionary to populate with the value for
this argument.
@type objects: C{dict}
@param proto: The protocol instance which received the AMP box being
interpreted. Most likely this is an instance of L{AMP}, but
this is not guaranteed.
@return: C{None}
"""
def toBox(name, strings, objects, proto):
"""
Given an argument name and a dictionary containing structured Python
objects, serialize values into one or more strings and add them to
the C{strings} dictionary.
@param name: The name associated with this argument. Most commonly,
this is the key which can be used to find an object in
C{objects} and which should be used as the key in C{strings} to
associate with a C{str} giving the serialized form of that
object.
@type name: C{str}
@param strings: The AMP box into which to insert one or more
strings.
@type strings: C{dict}
@param objects: The input dictionary from which to extract Python
objects to serialize.
@type objects: C{dict}
@param proto: The protocol instance which will send the AMP box once
it is fully populated. Most likely this is an instance of
L{AMP}, but this is not guaranteed.
@return: C{None}
"""
class IBoxSender(Interface):
"""
A transport which can send L{AmpBox} objects.
"""
def sendBox(box):
"""
Send an L{AmpBox}.
@raise ProtocolSwitched: if the underlying protocol has been
switched.
@raise ConnectionLost: if the underlying connection has already been
lost.
"""
def unhandledError(failure):
"""
An unhandled error occurred in response to a box. Log it
appropriately.
@param failure: a L{Failure} describing the error that occurred.
"""
class IBoxReceiver(Interface):
"""
An application object which can receive L{AmpBox} objects and dispatch them
appropriately.
"""
def startReceivingBoxes(boxSender):
"""
The L{ampBoxReceived} method will start being called; boxes may be
responded to by responding to the given L{IBoxSender}.
@param boxSender: an L{IBoxSender} provider.
"""
def ampBoxReceived(box):
"""
A box was received from the transport; dispatch it appropriately.
"""
def stopReceivingBoxes(reason):
"""
No further boxes will be received on this connection.
@type reason: L{Failure}
"""
class IResponderLocator(Interface):
"""
An application object which can look up appropriate responder methods for
AMP commands.
"""
def locateResponder(self, name):
"""
Locate a responder method appropriate for the named command.
@param name: the wire-level name (commandName) of the AMP command to be
responded to.
@return: a 1-argument callable that takes an L{AmpBox} with argument
values for the given command, and returns an L{AmpBox} containing
argument values for the named command, or a L{Deferred} that fires the
same.
"""
class AmpError(Exception):
"""
Base class of all Amp-related exceptions.
"""
class ProtocolSwitched(Exception):
"""
Connections which have been switched to other protocols can no longer
accept traffic at the AMP level. This is raised when you try to send it.
"""
class OnlyOneTLS(AmpError):
"""
This is an implementation limitation; TLS may only be started once per
connection.
"""
class NoEmptyBoxes(AmpError):
"""
You can't have empty boxes on the connection. This is raised when you
receive or attempt to send one.
"""
class InvalidSignature(AmpError):
"""
You didn't pass all the required arguments.
"""
class TooLong(AmpError):
"""
One of the protocol's length limitations was violated.
@ivar isKey: true if the string being encoded in a key position, false if
it was in a value position.
@ivar isLocal: Was the string encoded locally, or received too long from
the network? (It's only physically possible to encode "too long" values on
the network for keys.)
@ivar value: The string that was too long.
@ivar keyName: If the string being encoded was in a value position, what
key was it being encoded for?
"""
def __init__(self, isKey, isLocal, value, keyName=None):
AmpError.__init__(self)
self.isKey = isKey
self.isLocal = isLocal
self.value = value
self.keyName = keyName
def __repr__(self):
hdr = self.isKey and "key" or "value"
if not self.isKey:
hdr += ' ' + repr(self.keyName)
lcl = self.isLocal and "local" or "remote"
return "%s %s too long: %d" % (lcl, hdr, len(self.value))
class BadLocalReturn(AmpError):
"""
A bad value was returned from a local command; we were unable to coerce it.
"""
def __init__(self, message, enclosed):
AmpError.__init__(self)
self.message = message
self.enclosed = enclosed
def __repr__(self):
return self.message + " " + self.enclosed.getBriefTraceback()
__str__ = __repr__
class RemoteAmpError(AmpError):
"""
This error indicates that something went wrong on the remote end of the
connection, and the error was serialized and transmitted to you.
"""
def __init__(self, errorCode, description, fatal=False, local=None):
"""Create a remote error with an error code and description.
@param errorCode: the AMP error code of this error.
@param description: some text to show to the user.
@param fatal: a boolean, true if this error should terminate the
connection.
@param local: a local Failure, if one exists.
"""
if local:
localwhat = ' (local)'
othertb = local.getBriefTraceback()
else:
localwhat = ''
othertb = ''
Exception.__init__(self, "Code<%s>%s: %s%s" % (
errorCode, localwhat,
description, othertb))
self.local = local
self.errorCode = errorCode
self.description = description
self.fatal = fatal
class UnknownRemoteError(RemoteAmpError):
"""
This means that an error whose type we can't identify was raised from the
other side.
"""
def __init__(self, description):
errorCode = UNKNOWN_ERROR_CODE
RemoteAmpError.__init__(self, errorCode, description)
class MalformedAmpBox(AmpError):
"""
This error indicates that the wire-level protocol was malformed.
"""
class UnhandledCommand(AmpError):
"""
A command received via amp could not be dispatched.
"""
class IncompatibleVersions(AmpError):
"""
It was impossible to negotiate a compatible version of the protocol with
the other end of the connection.
"""
PROTOCOL_ERRORS = {UNHANDLED_ERROR_CODE: UnhandledCommand}
class AmpBox(dict):
"""
I am a packet in the AMP protocol, much like a regular str:str dictionary.
"""
__slots__ = [] # be like a regular dictionary, don't magically
# acquire a __dict__...
def copy(self):
"""
Return another AmpBox just like me.
"""
newBox = self.__class__()
newBox.update(self)
return newBox
def serialize(self):
"""
Convert me into a wire-encoded string.
@return: a str encoded according to the rules described in the module
docstring.
"""
i = self.items()
i.sort()
L = []
w = L.append
for k, v in i:
if type(k) == unicode:
raise TypeError("Unicode key not allowed: %r" % k)
if type(v) == unicode:
raise TypeError(
"Unicode value for key %r not allowed: %r" % (k, v))
if len(k) > MAX_KEY_LENGTH:
raise TooLong(True, True, k, None)
if len(v) > MAX_VALUE_LENGTH:
raise TooLong(False, True, v, k)
for kv in k, v:
w(pack("!H", len(kv)))
w(kv)
w(pack("!H", 0))
return ''.join(L)
def _sendTo(self, proto):
"""
Serialize and send this box to a Amp instance. By the time it is being
sent, several keys are required. I must have exactly ONE of::
_ask
_answer
_error
If the '_ask' key is set, then the '_command' key must also be
set.
@param proto: an AMP instance.
"""
proto.sendBox(self)
def __repr__(self):
return 'AmpBox(%s)' % (dict.__repr__(self),)
# amp.Box => AmpBox
Box = AmpBox
class QuitBox(AmpBox):
"""
I am an AmpBox that, upon being sent, terminates the connection.
"""
__slots__ = []
def __repr__(self):
return 'QuitBox(**%s)' % (super(QuitBox, self).__repr__(),)
def _sendTo(self, proto):
"""
Immediately call loseConnection after sending.
"""
super(QuitBox, self)._sendTo(proto)
proto.transport.loseConnection()
class _SwitchBox(AmpBox):
"""
Implementation detail of ProtocolSwitchCommand: I am a AmpBox which sets
up state for the protocol to switch.
"""
# DON'T set __slots__ here; we do have an attribute.
def __init__(self, innerProto, **kw):
"""
Create a _SwitchBox with the protocol to switch to after being sent.
@param innerProto: the protocol instance to switch to.
@type innerProto: an IProtocol provider.
"""
super(_SwitchBox, self).__init__(**kw)
self.innerProto = innerProto
def __repr__(self):
return '_SwitchBox(%r, **%s)' % (self.innerProto,
dict.__repr__(self),)
def _sendTo(self, proto):
"""
Send me; I am the last box on the connection. All further traffic will be
over the new protocol.
"""
super(_SwitchBox, self)._sendTo(proto)
proto._lockForSwitch()
proto._switchTo(self.innerProto)
class BoxDispatcher:
"""
A L{BoxDispatcher} dispatches '_ask', '_answer', and '_error' L{AmpBox}es,
both incoming and outgoing, to their appropriate destinations.
Outgoing commands are converted into L{Deferred}s and outgoing boxes, and
associated tracking state to fire those L{Deferred} when '_answer' boxes
come back. Incoming '_answer' and '_error' boxes are converted into
callbacks and errbacks on those L{Deferred}s, respectively.
Incoming '_ask' boxes are converted into method calls on a supplied method
locator.
@ivar _outstandingRequests: a dictionary mapping request IDs to
L{Deferred}s which were returned for those requests.
@ivar locator: an object with a L{locateResponder} method that locates a
responder function that takes a Box and returns a result (either a Box or a
Deferred which fires one).
@ivar boxSender: an object which can send boxes, via the L{_sendBox}
method, such as an L{AMP} instance.
@type boxSender: L{IBoxSender}
"""
implements(IBoxReceiver)
_failAllReason = None
_outstandingRequests = None
_counter = 0L
boxSender = None
def __init__(self, locator):
self._outstandingRequests = {}
self.locator = locator
def startReceivingBoxes(self, boxSender):
"""
The given boxSender is going to start calling boxReceived on this
L{BoxDispatcher}.
@param boxSender: The L{IBoxSender} to send command responses to.
"""
self.boxSender = boxSender
def stopReceivingBoxes(self, reason):
"""
No further boxes will be received here. Terminate all currently
oustanding command deferreds with the given reason.
"""
self.failAllOutgoing(reason)
def failAllOutgoing(self, reason):
"""
Call the errback on all outstanding requests awaiting responses.
@param reason: the Failure instance to pass to those errbacks.
"""
self._failAllReason = reason
OR = self._outstandingRequests.items()
self._outstandingRequests = None # we can never send another request
for key, value in OR:
value.errback(reason)
def _nextTag(self):
"""
Generate protocol-local serial numbers for _ask keys.
@return: a string that has not yet been used on this connection.
"""
self._counter += 1
return '%x' % (self._counter,)
def _sendBoxCommand(self, command, box, requiresAnswer=True):
"""
Send a command across the wire with the given C{amp.Box}.
Mutate the given box to give it any additional keys (_command, _ask)
required for the command and request/response machinery, then send it.
If requiresAnswer is True, returns a C{Deferred} which fires when a
response is received. The C{Deferred} is fired with an C{amp.Box} on
success, or with an C{amp.RemoteAmpError} if an error is received.
If the Deferred fails and the error is not handled by the caller of
this method, the failure will be logged and the connection dropped.
@param command: a str, the name of the command to issue.
@param box: an AmpBox with the arguments for the command.
@param requiresAnswer: a boolean. Defaults to True. If True, return a
Deferred which will fire when the other side responds to this command.
If False, return None and do not ask the other side for acknowledgement.
@return: a Deferred which fires the AmpBox that holds the response to
this command, or None, as specified by requiresAnswer.
@raise ProtocolSwitched: if the protocol has been switched.
"""
if self._failAllReason is not None:
return fail(self._failAllReason)
box[COMMAND] = command
tag = self._nextTag()
if requiresAnswer:
box[ASK] = tag
box._sendTo(self.boxSender)
if requiresAnswer:
result = self._outstandingRequests[tag] = Deferred()
else:
result = None
return result
def callRemoteString(self, command, requiresAnswer=True, **kw):
"""
This is a low-level API, designed only for optimizing simple messages
for which the overhead of parsing is too great.
@param command: a str naming the command.
@param kw: arguments to the amp box.
@param requiresAnswer: a boolean. Defaults to True. If True, return a
Deferred which will fire when the other side responds to this command.
If False, return None and do not ask the other side for acknowledgement.
@return: a Deferred which fires the AmpBox that holds the response to
this command, or None, as specified by requiresAnswer.
"""
box = Box(kw)
return self._sendBoxCommand(command, box, requiresAnswer)
def callRemote(self, commandType, *a, **kw):
"""
This is the primary high-level API for sending messages via AMP. Invoke it
with a command and appropriate arguments to send a message to this
connection's peer.
@param commandType: a subclass of Command.
@type commandType: L{type}
@param a: Positional (special) parameters taken by the command.
Positional parameters will typically not be sent over the wire. The
only command included with AMP which uses positional parameters is
L{ProtocolSwitchCommand}, which takes the protocol that will be
switched to as its first argument.
@param kw: Keyword arguments taken by the command. These are the
arguments declared in the command's 'arguments' attribute. They will
be encoded and sent to the peer as arguments for the L{commandType}.
@return: If L{commandType} has a C{requiresAnswer} attribute set to
L{False}, then return L{None}. Otherwise, return a L{Deferred} which
fires with a dictionary of objects representing the result of this
call. Additionally, this L{Deferred} may fail with an exception
representing a connection failure, with L{UnknownRemoteError} if the
other end of the connection fails for an unknown reason, or with any
error specified as a key in L{commandType}'s C{errors} dictionary.
"""
# XXX this takes command subclasses and not command objects on purpose.
# There's really no reason to have all this back-and-forth between
# command objects and the protocol, and the extra object being created
# (the Command instance) is pointless. Command is kind of like
# Interface, and should be more like it.
# In other words, the fact that commandType is instantiated here is an
# implementation detail. Don't rely on it.
try:
co = commandType(*a, **kw)
except:
return fail()
return co._doCommand(self)
def unhandledError(self, failure):
"""
This is a terminal callback called after application code has had a
chance to quash any errors.
"""
return self.boxSender.unhandledError(failure)
def _answerReceived(self, box):
"""
An AMP box was received that answered a command previously sent with
L{callRemote}.
@param box: an AmpBox with a value for its L{ANSWER} key.
"""
question = self._outstandingRequests.pop(box[ANSWER])
question.addErrback(self.unhandledError)
question.callback(box)
def _errorReceived(self, box):
"""
An AMP box was received that answered a command previously sent with
L{callRemote}, with an error.
@param box: an L{AmpBox} with a value for its L{ERROR}, L{ERROR_CODE},
and L{ERROR_DESCRIPTION} keys.
"""
question = self._outstandingRequests.pop(box[ERROR])
question.addErrback(self.unhandledError)
errorCode = box[ERROR_CODE]
description = box[ERROR_DESCRIPTION]
if errorCode in PROTOCOL_ERRORS:
exc = PROTOCOL_ERRORS[errorCode](errorCode, description)
else:
exc = RemoteAmpError(errorCode, description)
question.errback(Failure(exc))
def _commandReceived(self, box):
"""
@param box: an L{AmpBox} with a value for its L{COMMAND} and L{ASK}
keys.
"""
def formatAnswer(answerBox):
answerBox[ANSWER] = box[ASK]
return answerBox
def formatError(error):
if error.check(RemoteAmpError):
code = error.value.errorCode
desc = error.value.description
if error.value.fatal:
errorBox = QuitBox()
else:
errorBox = AmpBox()
else:
errorBox = QuitBox()
log.err(error) # here is where server-side logging happens
# if the error isn't handled
code = UNKNOWN_ERROR_CODE
desc = "Unknown Error"
errorBox[ERROR] = box[ASK]
errorBox[ERROR_DESCRIPTION] = desc
errorBox[ERROR_CODE] = code
return errorBox
deferred = self.dispatchCommand(box)
if ASK in box:
deferred.addCallbacks(formatAnswer, formatError)
deferred.addCallback(self._safeEmit)
deferred.addErrback(self.unhandledError)
def ampBoxReceived(self, box):
"""
An AmpBox was received, representing a command, or an answer to a
previously issued command (either successful or erroneous). Respond to
it according to its contents.
@param box: an AmpBox
@raise NoEmptyBoxes: when a box is received that does not contain an
'_answer', '_command' / '_ask', or '_error' key; i.e. one which does not
fit into the command / response protocol defined by AMP.
"""
if ANSWER in box:
self._answerReceived(box)
elif ERROR in box:
self._errorReceived(box)
elif COMMAND in box:
self._commandReceived(box)
else:
raise NoEmptyBoxes(box)
def _safeEmit(self, aBox):
"""
Emit a box, ignoring L{ProtocolSwitched} and L{ConnectionLost} errors
which cannot be usefully handled.
"""
try:
aBox._sendTo(self.boxSender)
except (ProtocolSwitched, ConnectionLost):
pass
def dispatchCommand(self, box):
"""
A box with a _command key was received.
Dispatch it to a local handler call it.
@param proto: an AMP instance.
@param box: an AmpBox to be dispatched.
"""
cmd = box[COMMAND]
responder = self.locator.locateResponder(cmd)
if responder is None:
return fail(RemoteAmpError(
UNHANDLED_ERROR_CODE,
"Unhandled Command: %r" % (cmd,),
False,
local=Failure(UnhandledCommand())))
return maybeDeferred(responder, box)
class CommandLocator:
"""
A L{CommandLocator} is a collection of responders to AMP L{Command}s, with
the help of the L{Command.responder} decorator.
"""
class __metaclass__(type):
"""
This metaclass keeps track of all of the Command.responder-decorated
methods defined since the last CommandLocator subclass was defined. It
assumes (usually correctly, but unfortunately not necessarily so) that
those commands responders were all declared as methods of the class
being defined. Note that this list can be incorrect if users use the
Command.responder decorator outside the context of a CommandLocator
class declaration.
Command responders defined on subclasses are given precedence over
those inherited from a base class.
The Command.responder decorator explicitly cooperates with this
metaclass.
"""
_currentClassCommands = []
def __new__(cls, name, bases, attrs):
commands = cls._currentClassCommands[:]
cls._currentClassCommands[:] = []
cd = attrs['_commandDispatch'] = {}
subcls = type.__new__(cls, name, bases, attrs)
ancestors = list(subcls.__mro__[1:])
ancestors.reverse()
for ancestor in ancestors:
cd.update(getattr(ancestor, '_commandDispatch', {}))
for commandClass, responderFunc in commands:
cd[commandClass.commandName] = (commandClass, responderFunc)
if (bases and (
subcls.lookupFunction != CommandLocator.lookupFunction)):
def locateResponder(self, name):
warnings.warn(
"Override locateResponder, not lookupFunction.",
category=PendingDeprecationWarning,
stacklevel=2)
return self.lookupFunction(name)
subcls.locateResponder = locateResponder
return subcls
implements(IResponderLocator)
def _wrapWithSerialization(self, aCallable, command):
"""
Wrap aCallable with its command's argument de-serialization
and result serialization logic.
@param aCallable: a callable with a 'command' attribute, designed to be
called with keyword arguments.
@param command: the command class whose serialization to use.
@return: a 1-arg callable which, when invoked with an AmpBox, will
deserialize the argument list and invoke appropriate user code for the
callable's command, returning a Deferred which fires with the result or
fails with an error.
"""
def doit(box):
kw = command.parseArguments(box, self)
def checkKnownErrors(error):
key = error.trap(*command.allErrors)
code = command.allErrors[key]
desc = str(error.value)
return Failure(RemoteAmpError(
code, desc, key in command.fatalErrors, local=error))
def makeResponseFor(objects):
try:
return command.makeResponse(objects, self)
except:
# let's helpfully log this.
originalFailure = Failure()
raise BadLocalReturn(
"%r returned %r and %r could not serialize it" % (
aCallable,
objects,
command),
originalFailure)
return maybeDeferred(aCallable, **kw).addCallback(
makeResponseFor).addErrback(
checkKnownErrors)
return doit
def lookupFunction(self, name):
"""
Deprecated synonym for L{locateResponder}
"""
if self.__class__.lookupFunction != CommandLocator.lookupFunction:
return CommandLocator.locateResponder(self, name)
else:
warnings.warn("Call locateResponder, not lookupFunction.",
category=PendingDeprecationWarning,
stacklevel=2)
return self.locateResponder(name)
def locateResponder(self, name):
"""
Locate a callable to invoke when executing the named command.
@param name: the normalized name (from the wire) of the command.
@return: a 1-argument function that takes a Box and returns a box or a
Deferred which fires a Box, for handling the command identified by the
given name, or None, if no appropriate responder can be found.
"""
# Try to find a high-level method to invoke, and if we can't find one,
# fall back to a low-level one.
cd = self._commandDispatch
if name in cd:
commandClass, responderFunc = cd[name]
responderMethod = types.MethodType(
responderFunc, self, self.__class__)
return self._wrapWithSerialization(responderMethod, commandClass)
class SimpleStringLocator(object):
"""
Implement the L{locateResponder} method to do simple, string-based
dispatch.
"""
implements(IResponderLocator)
baseDispatchPrefix = 'amp_'
def locateResponder(self, name):
"""
Locate a callable to invoke when executing the named command.
@return: a function with the name C{"amp_" + name} on L{self}, or None
if no such function exists. This function will then be called with the
L{AmpBox} itself as an argument.
@param name: the normalized name (from the wire) of the command.
"""
fName = self.baseDispatchPrefix + (name.upper())
return getattr(self, fName, None)
PYTHON_KEYWORDS = [
'and', 'del', 'for', 'is', 'raise', 'assert', 'elif', 'from', 'lambda',
'return', 'break', 'else', 'global', 'not', 'try', 'class', 'except',
'if', 'or', 'while', 'continue', 'exec', 'import', 'pass', 'yield',
'def', 'finally', 'in', 'print']
def _wireNameToPythonIdentifier(key):
"""
(Private) Normalize an argument name from the wire for use with Python
code. If the return value is going to be a python keyword it will be
capitalized. If it contains any dashes they will be replaced with
underscores.
The rationale behind this method is that AMP should be an inherently
multi-language protocol, so message keys may contain all manner of bizarre
bytes. This is not a complete solution; there are still forms of arguments
that this implementation will be unable to parse. However, Python
identifiers share a huge raft of properties with identifiers from many
other languages, so this is a 'good enough' effort for now. We deal
explicitly with dashes because that is the most likely departure: Lisps
commonly use dashes to separate method names, so protocols initially
implemented in a lisp amp dialect may use dashes in argument or command
names.
@param key: a str, looking something like 'foo-bar-baz' or 'from'
@return: a str which is a valid python identifier, looking something like
'foo_bar_baz' or 'From'.
"""
lkey = key.replace("-", "_")
if lkey in PYTHON_KEYWORDS:
return lkey.title()
return lkey
class Argument:
"""
Base-class of all objects that take values from Amp packets and convert
them into objects for Python functions.
This implementation of L{IArgumentType} provides several higher-level
hooks for subclasses to override. See L{toString} and L{fromString}
which will be used to define the behavior of L{IArgumentType.toBox} and
L{IArgumentType.fromBox}, respectively.
"""
implements(IArgumentType)
optional = False
def __init__(self, optional=False):
"""
Create an Argument.
@param optional: a boolean indicating whether this argument can be
omitted in the protocol.
"""
self.optional = optional
def retrieve(self, d, name, proto):
"""
Retrieve the given key from the given dictionary, removing it if found.
@param d: a dictionary.
@param name: a key in L{d}.
@param proto: an instance of an AMP.
@raise KeyError: if I am not optional and no value was found.
@return: d[name].
"""
if self.optional:
value = d.get(name)
if value is not None:
del d[name]
else:
value = d.pop(name)
return value
def fromBox(self, name, strings, objects, proto):
"""
Populate an 'out' dictionary with mapping names to Python values
decoded from an 'in' AmpBox mapping strings to string values.
@param name: the argument name to retrieve
@type name: str
@param strings: The AmpBox to read string(s) from, a mapping of
argument names to string values.
@type strings: AmpBox
@param objects: The dictionary to write object(s) to, a mapping of
names to Python objects.
@type objects: dict
@param proto: an AMP instance.
"""
st = self.retrieve(strings, name, proto)
nk = _wireNameToPythonIdentifier(name)
if self.optional and st is None:
objects[nk] = None
else:
objects[nk] = self.fromStringProto(st, proto)
def toBox(self, name, strings, objects, proto):
"""
Populate an 'out' AmpBox with strings encoded from an 'in' dictionary
mapping names to Python values.
@param name: the argument name to retrieve
@type name: str
@param strings: The AmpBox to write string(s) to, a mapping of
argument names to string values.
@type strings: AmpBox
@param objects: The dictionary to read object(s) from, a mapping of
names to Python objects.
@type objects: dict
@param proto: the protocol we are converting for.
@type proto: AMP
"""
obj = self.retrieve(objects, _wireNameToPythonIdentifier(name), proto)
if self.optional and obj is None:
# strings[name] = None
pass
else:
strings[name] = self.toStringProto(obj, proto)
def fromStringProto(self, inString, proto):
"""
Convert a string to a Python value.
@param inString: the string to convert.
@param proto: the protocol we are converting for.
@type proto: AMP
@return: a Python object.
"""
return self.fromString(inString)
def toStringProto(self, inObject, proto):
"""
Convert a Python object to a string.
@param inObject: the object to convert.
@param proto: the protocol we are converting for.
@type proto: AMP
"""
return self.toString(inObject)
def fromString(self, inString):
"""
Convert a string to a Python object. Subclasses must implement this.
@param inString: the string to convert.
@type inString: str
@return: the decoded value from inString
"""
def toString(self, inObject):
"""
Convert a Python object into a string for passing over the network.
@param inObject: an object of the type that this Argument is intended
to deal with.
@return: the wire encoding of inObject
@rtype: str
"""
class Integer(Argument):
"""
Encode any integer values of any size on the wire as the string
representation.
Example: C{123} becomes C{"123"}
"""
fromString = int
def toString(self, inObject):
return str(int(inObject))
class String(Argument):
"""
Don't do any conversion at all; just pass through 'str'.
"""
def toString(self, inObject):
return inObject
def fromString(self, inString):
return inString
class Float(Argument):
"""
Encode floating-point values on the wire as their repr.
"""
fromString = float
toString = repr
class Boolean(Argument):
"""
Encode True or False as "True" or "False" on the wire.
"""
def fromString(self, inString):
if inString == 'True':
return True
elif inString == 'False':
return False
else:
raise TypeError("Bad boolean value: %r" % (inString,))
def toString(self, inObject):
if inObject:
return 'True'
else:
return 'False'
class Unicode(String):
"""
Encode a unicode string on the wire as UTF-8.
"""
def toString(self, inObject):
# assert isinstance(inObject, unicode)
return String.toString(self, inObject.encode('utf-8'))
def fromString(self, inString):
# assert isinstance(inString, str)
return String.fromString(self, inString).decode('utf-8')
class Path(Unicode):
"""
Encode and decode L{filepath.FilePath} instances as paths on the wire.
This is really intended for use with subprocess communication tools:
exchanging pathnames on different machines over a network is not generally
meaningful, but neither is it disallowed; you can use this to communicate
about NFS paths, for example.
"""
def fromString(self, inString):
return filepath.FilePath(Unicode.fromString(self, inString))
def toString(self, inObject):
return Unicode.toString(self, inObject.path)
class ListOf(Argument):
"""
Encode and decode lists of instances of a single other argument type.
For example, if you want to pass::
[3, 7, 9, 15]
You can create an argument like this::
ListOf(Integer())
The serialized form of the entire list is subject to the limit imposed by
L{MAX_VALUE_LENGTH}. List elements are represented as 16-bit length
prefixed strings. The argument type passed to the L{ListOf} initializer is
responsible for producing the serialized form of each element.
@ivar elementType: The L{Argument} instance used to encode and decode list
elements (note, not an arbitrary L{IArgument} implementation:
arguments must be implemented using only the C{fromString} and
C{toString} methods, not the C{fromBox} and C{toBox} methods).
@param optional: a boolean indicating whether this argument can be
omitted in the protocol.
@since: 10.0
"""
def __init__(self, elementType, optional=False):
self.elementType = elementType
Argument.__init__(self, optional)
def fromString(self, inString):
"""
Convert the serialized form of a list of instances of some type back
into that list.
"""
strings = []
parser = Int16StringReceiver()
parser.stringReceived = strings.append
parser.dataReceived(inString)
return map(self.elementType.fromString, strings)
def toString(self, inObject):
"""
Serialize the given list of objects to a single string.
"""
strings = []
for obj in inObject:
serialized = self.elementType.toString(obj)
strings.append(pack('!H', len(serialized)))
strings.append(serialized)
return ''.join(strings)
class AmpList(Argument):
"""
Convert a list of dictionaries into a list of AMP boxes on the wire.
For example, if you want to pass::
[{'a': 7, 'b': u'hello'}, {'a': 9, 'b': u'goodbye'}]
You might use an AmpList like this in your arguments or response list::
AmpList([('a', Integer()),
('b', Unicode())])
"""
def __init__(self, subargs, optional=False):
"""
Create an AmpList.
@param subargs: a list of 2-tuples of ('name', argument) describing the
schema of the dictionaries in the sequence of amp boxes.
@param optional: a boolean indicating whether this argument can be
omitted in the protocol.
"""
self.subargs = subargs
Argument.__init__(self, optional)
def fromStringProto(self, inString, proto):
boxes = parseString(inString)
values = [_stringsToObjects(box, self.subargs, proto)
for box in boxes]
return values
def toStringProto(self, inObject, proto):
return ''.join([_objectsToStrings(
objects, self.subargs, Box(), proto
).serialize() for objects in inObject])
class Command:
"""
Subclass me to specify an AMP Command.
@cvar arguments: A list of 2-tuples of (name, Argument-subclass-instance),
specifying the names and values of the parameters which are required for
this command.
@cvar response: A list like L{arguments}, but instead used for the return
value.
@cvar errors: A mapping of subclasses of L{Exception} to wire-protocol tags
for errors represented as L{str}s. Responders which raise keys from this
dictionary will have the error translated to the corresponding tag on the
wire. Invokers which receive Deferreds from invoking this command with
L{AMP.callRemote} will potentially receive Failures with keys from this
mapping as their value. This mapping is inherited; if you declare a
command which handles C{FooError} as 'FOO_ERROR', then subclass it and
specify C{BarError} as 'BAR_ERROR', responders to the subclass may raise
either C{FooError} or C{BarError}, and invokers must be able to deal with
either of those exceptions.
@cvar fatalErrors: like 'errors', but errors in this list will always
terminate the connection, despite being of a recognizable error type.
@cvar commandType: The type of Box used to issue commands; useful only for
protocol-modifying behavior like startTLS or protocol switching. Defaults
to a plain vanilla L{Box}.
@cvar responseType: The type of Box used to respond to this command; only
useful for protocol-modifying behavior like startTLS or protocol switching.
Defaults to a plain vanilla L{Box}.
@ivar requiresAnswer: a boolean; defaults to True. Set it to False on your
subclass if you want callRemote to return None. Note: this is a hint only
to the client side of the protocol. The return-type of a command responder
method must always be a dictionary adhering to the contract specified by
L{response}, because clients are always free to request a response if they
want one.
"""
class __metaclass__(type):
"""
Metaclass hack to establish reverse-mappings for 'errors' and
'fatalErrors' as class vars.
"""
def __new__(cls, name, bases, attrs):
reverseErrors = attrs['reverseErrors'] = {}
er = attrs['allErrors'] = {}
if 'commandName' not in attrs:
attrs['commandName'] = name
newtype = type.__new__(cls, name, bases, attrs)
errors = {}
fatalErrors = {}
accumulateClassDict(newtype, 'errors', errors)
accumulateClassDict(newtype, 'fatalErrors', fatalErrors)
for v, k in errors.iteritems():
reverseErrors[k] = v
er[v] = k
for v, k in fatalErrors.iteritems():
reverseErrors[k] = v
er[v] = k
return newtype
arguments = []
response = []
extra = []
errors = {}
fatalErrors = {}
commandType = Box
responseType = Box
requiresAnswer = True
def __init__(self, **kw):
"""
Create an instance of this command with specified values for its
parameters.
@param kw: a dict containing an appropriate value for each name
specified in the L{arguments} attribute of my class.
@raise InvalidSignature: if you forgot any required arguments.
"""
self.structured = kw
givenArgs = kw.keys()
forgotten = []
for name, arg in self.arguments:
pythonName = _wireNameToPythonIdentifier(name)
if pythonName not in givenArgs and not arg.optional:
forgotten.append(pythonName)
if forgotten:
raise InvalidSignature("forgot %s for %s" % (
', '.join(forgotten), self.commandName))
forgotten = []
def makeResponse(cls, objects, proto):
"""
Serialize a mapping of arguments using this L{Command}'s
response schema.
@param objects: a dict with keys matching the names specified in
self.response, having values of the types that the Argument objects in
self.response can format.
@param proto: an L{AMP}.
@return: an L{AmpBox}.
"""
try:
responseType = cls.responseType()
except:
return fail()
return _objectsToStrings(objects, cls.response, responseType, proto)
makeResponse = classmethod(makeResponse)
def makeArguments(cls, objects, proto):
"""
Serialize a mapping of arguments using this L{Command}'s
argument schema.
@param objects: a dict with keys similar to the names specified in
self.arguments, having values of the types that the Argument objects in
self.arguments can parse.
@param proto: an L{AMP}.
@return: An instance of this L{Command}'s C{commandType}.
"""
allowedNames = set()
for (argName, ignored) in cls.arguments:
allowedNames.add(_wireNameToPythonIdentifier(argName))
for intendedArg in objects:
if intendedArg not in allowedNames:
raise InvalidSignature(
"%s is not a valid argument" % (intendedArg,))
return _objectsToStrings(objects, cls.arguments, cls.commandType(),
proto)
makeArguments = classmethod(makeArguments)
def parseResponse(cls, box, protocol):
"""
Parse a mapping of serialized arguments using this
L{Command}'s response schema.
@param box: A mapping of response-argument names to the
serialized forms of those arguments.
@param protocol: The L{AMP} protocol.
@return: A mapping of response-argument names to the parsed
forms.
"""
return _stringsToObjects(box, cls.response, protocol)
parseResponse = classmethod(parseResponse)
def parseArguments(cls, box, protocol):
"""
Parse a mapping of serialized arguments using this
L{Command}'s argument schema.
@param box: A mapping of argument names to the seralized forms
of those arguments.
@param protocol: The L{AMP} protocol.
@return: A mapping of argument names to the parsed forms.
"""
return _stringsToObjects(box, cls.arguments, protocol)
parseArguments = classmethod(parseArguments)
def responder(cls, methodfunc):
"""
Declare a method to be a responder for a particular command.
This is a decorator.
Use like so::
class MyCommand(Command):
arguments = [('a', ...), ('b', ...)]
class MyProto(AMP):
def myFunMethod(self, a, b):
...
MyCommand.responder(myFunMethod)
Notes: Although decorator syntax is not used within Twisted, this
function returns its argument and is therefore safe to use with
decorator syntax.
This is not thread safe. Don't declare AMP subclasses in other
threads. Don't declare responders outside the scope of AMP subclasses;
the behavior is undefined.
@param methodfunc: A function which will later become a method, which
has a keyword signature compatible with this command's L{argument} list
and returns a dictionary with a set of keys compatible with this
command's L{response} list.
@return: the methodfunc parameter.
"""
CommandLocator._currentClassCommands.append((cls, methodfunc))
return methodfunc
responder = classmethod(responder)
# Our only instance method
def _doCommand(self, proto):
"""
Encode and send this Command to the given protocol.
@param proto: an AMP, representing the connection to send to.
@return: a Deferred which will fire or error appropriately when the
other side responds to the command (or error if the connection is lost
before it is responded to).
"""
def _massageError(error):
error.trap(RemoteAmpError)
rje = error.value
errorType = self.reverseErrors.get(rje.errorCode,
UnknownRemoteError)
return Failure(errorType(rje.description))
d = proto._sendBoxCommand(self.commandName,
self.makeArguments(self.structured, proto),
self.requiresAnswer)
if self.requiresAnswer:
d.addCallback(self.parseResponse, proto)
d.addErrback(_massageError)
return d
class _NoCertificate:
"""
This is for peers which don't want to use a local certificate. Used by
AMP because AMP's internal language is all about certificates and this
duck-types in the appropriate place; this API isn't really stable though,
so it's not exposed anywhere public.
For clients, it will use ephemeral DH keys, or whatever the default is for
certificate-less clients in OpenSSL. For servers, it will generate a
temporary self-signed certificate with garbage values in the DN and use
that.
"""
def __init__(self, client):
"""
Create a _NoCertificate which either is or isn't for the client side of
the connection.
@param client: True if we are a client and should truly have no
certificate and be anonymous, False if we are a server and actually
have to generate a temporary certificate.
@type client: bool
"""
self.client = client
def options(self, *authorities):
"""
Behaves like L{twisted.internet.ssl.PrivateCertificate.options}().
"""
if not self.client:
# do some crud with sslverify to generate a temporary self-signed
# certificate. This is SLOOOWWWWW so it is only in the absolute
# worst, most naive case.
# We have to do this because OpenSSL will not let both the server
# and client be anonymous.
sharedDN = DN(CN='TEMPORARY CERTIFICATE')
key = KeyPair.generate()
cr = key.certificateRequest(sharedDN)
sscrd = key.signCertificateRequest(sharedDN, cr, lambda dn: True, 1)
cert = key.newCertificate(sscrd)
return cert.options(*authorities)
options = dict()
if authorities:
options.update(dict(verify=True,
requireCertificate=True,
caCerts=[auth.original for auth in authorities]))
occo = CertificateOptions(**options)
return occo
class _TLSBox(AmpBox):
"""
I am an AmpBox that, upon being sent, initiates a TLS connection.
"""
__slots__ = []
def __init__(self):
if ssl is None:
raise RemoteAmpError("TLS_ERROR", "TLS not available")
AmpBox.__init__(self)
def _keyprop(k, default):
return property(lambda self: self.get(k, default))
# These properties are described in startTLS
certificate = _keyprop('tls_localCertificate', _NoCertificate(False))
verify = _keyprop('tls_verifyAuthorities', None)
def _sendTo(self, proto):
"""
Send my encoded value to the protocol, then initiate TLS.
"""
ab = AmpBox(self)
for k in ['tls_localCertificate',
'tls_verifyAuthorities']:
ab.pop(k, None)
ab._sendTo(proto)
proto._startTLS(self.certificate, self.verify)
class _LocalArgument(String):
"""
Local arguments are never actually relayed across the wire. This is just a
shim so that StartTLS can pretend to have some arguments: if arguments
acquire documentation properties, replace this with something nicer later.
"""
def fromBox(self, name, strings, objects, proto):
pass
class StartTLS(Command):
"""
Use, or subclass, me to implement a command that starts TLS.
Callers of StartTLS may pass several special arguments, which affect the
TLS negotiation:
- tls_localCertificate: This is a
twisted.internet.ssl.PrivateCertificate which will be used to secure
the side of the connection it is returned on.
- tls_verifyAuthorities: This is a list of
twisted.internet.ssl.Certificate objects that will be used as the
certificate authorities to verify our peer's certificate.
Each of those special parameters may also be present as a key in the
response dictionary.
"""
arguments = [("tls_localCertificate", _LocalArgument(optional=True)),
("tls_verifyAuthorities", _LocalArgument(optional=True))]
response = [("tls_localCertificate", _LocalArgument(optional=True)),
("tls_verifyAuthorities", _LocalArgument(optional=True))]
responseType = _TLSBox
def __init__(self, **kw):
"""
Create a StartTLS command. (This is private. Use AMP.callRemote.)
@param tls_localCertificate: the PrivateCertificate object to use to
secure the connection. If it's None, or unspecified, an ephemeral DH
key is used instead.
@param tls_verifyAuthorities: a list of Certificate objects which
represent root certificates to verify our peer with.
"""
if ssl is None:
raise RuntimeError("TLS not available.")
self.certificate = kw.pop('tls_localCertificate', _NoCertificate(True))
self.authorities = kw.pop('tls_verifyAuthorities', None)
Command.__init__(self, **kw)
def _doCommand(self, proto):
"""
When a StartTLS command is sent, prepare to start TLS, but don't actually
do it; wait for the acknowledgement, then initiate the TLS handshake.
"""
d = Command._doCommand(self, proto)
proto._prepareTLS(self.certificate, self.authorities)
# XXX before we get back to user code we are going to start TLS...
def actuallystart(response):
proto._startTLS(self.certificate, self.authorities)
return response
d.addCallback(actuallystart)
return d
class ProtocolSwitchCommand(Command):
"""
Use this command to switch from something Amp-derived to a different
protocol mid-connection. This can be useful to use amp as the
connection-startup negotiation phase. Since TLS is a different layer
entirely, you can use Amp to negotiate the security parameters of your
connection, then switch to a different protocol, and the connection will
remain secured.
"""
def __init__(self, _protoToSwitchToFactory, **kw):
"""
Create a ProtocolSwitchCommand.
@param _protoToSwitchToFactory: a ProtocolFactory which will generate
the Protocol to switch to.
@param kw: Keyword arguments, encoded and handled normally as
L{Command} would.
"""
self.protoToSwitchToFactory = _protoToSwitchToFactory
super(ProtocolSwitchCommand, self).__init__(**kw)
def makeResponse(cls, innerProto, proto):
return _SwitchBox(innerProto)
makeResponse = classmethod(makeResponse)
def _doCommand(self, proto):
"""
When we emit a ProtocolSwitchCommand, lock the protocol, but don't actually
switch to the new protocol unless an acknowledgement is received. If
an error is received, switch back.
"""
d = super(ProtocolSwitchCommand, self)._doCommand(proto)
proto._lockForSwitch()
def switchNow(ign):
innerProto = self.protoToSwitchToFactory.buildProtocol(
proto.transport.getPeer())
proto._switchTo(innerProto, self.protoToSwitchToFactory)
return ign
def handle(ign):
proto._unlockFromSwitch()
self.protoToSwitchToFactory.clientConnectionFailed(
None, Failure(CONNECTION_LOST))
return ign
return d.addCallbacks(switchNow, handle)
class BinaryBoxProtocol(StatefulStringProtocol, Int16StringReceiver):
"""
A protocol for receving L{Box}es - key/value pairs - via length-prefixed
strings. A box is composed of:
- any number of key-value pairs, described by:
- a 2-byte network-endian packed key length (of which the first
byte must be null, and the second must be non-null: i.e. the
value of the length must be 1-255)
- a key, comprised of that many bytes
- a 2-byte network-endian unsigned value length (up to the maximum
of 65535)
- a value, comprised of that many bytes
- 2 null bytes
In other words, an even number of strings prefixed with packed unsigned
16-bit integers, and then a 0-length string to indicate the end of the box.
This protocol also implements 2 extra private bits of functionality related
to the byte boundaries between messages; it can start TLS between two given
boxes or switch to an entirely different protocol. However, due to some
tricky elements of the implementation, the public interface to this
functionality is L{ProtocolSwitchCommand} and L{StartTLS}.
@ivar _keyLengthLimitExceeded: A flag which is only true when the
connection is being closed because a key length prefix which was longer
than allowed by the protocol was received.
@ivar boxReceiver: an L{IBoxReceiver} provider, whose L{ampBoxReceived}
method will be invoked for each L{Box} that is received.
"""
implements(IBoxSender)
_justStartedTLS = False
_startingTLSBuffer = None
_locked = False
_currentKey = None
_currentBox = None
_keyLengthLimitExceeded = False
hostCertificate = None
noPeerCertificate = False # for tests
innerProtocol = None
innerProtocolClientFactory = None
def __init__(self, boxReceiver):
self.boxReceiver = boxReceiver
def _switchTo(self, newProto, clientFactory=None):
"""
Switch this BinaryBoxProtocol's transport to a new protocol. You need
to do this 'simultaneously' on both ends of a connection; the easiest
way to do this is to use a subclass of ProtocolSwitchCommand.
@param newProto: the new protocol instance to switch to.
@param clientFactory: the ClientFactory to send the
L{clientConnectionLost} notification to.
"""
# All the data that Int16Receiver has not yet dealt with belongs to our
# new protocol: luckily it's keeping that in a handy (although
# ostensibly internal) variable for us:
newProtoData = self.recvd
# We're quite possibly in the middle of a 'dataReceived' loop in
# Int16StringReceiver: let's make sure that the next iteration, the
# loop will break and not attempt to look at something that isn't a
# length prefix.
self.recvd = ''
# Finally, do the actual work of setting up the protocol and delivering
# its first chunk of data, if one is available.
self.innerProtocol = newProto
self.innerProtocolClientFactory = clientFactory
newProto.makeConnection(self.transport)
if newProtoData:
newProto.dataReceived(newProtoData)
def sendBox(self, box):
"""
Send a amp.Box to my peer.
Note: transport.write is never called outside of this method.
@param box: an AmpBox.
@raise ProtocolSwitched: if the protocol has previously been switched.
@raise ConnectionLost: if the connection has previously been lost.
"""
if self._locked:
raise ProtocolSwitched(
"This connection has switched: no AMP traffic allowed.")
if self.transport is None:
raise ConnectionLost()
if self._startingTLSBuffer is not None:
self._startingTLSBuffer.append(box)
else:
self.transport.write(box.serialize())
def makeConnection(self, transport):
"""
Notify L{boxReceiver} that it is about to receive boxes from this
protocol by invoking L{startReceivingBoxes}.
"""
self.transport = transport
self.boxReceiver.startReceivingBoxes(self)
self.connectionMade()
def dataReceived(self, data):
"""
Either parse incoming data as L{AmpBox}es or relay it to our nested
protocol.
"""
if self._justStartedTLS:
self._justStartedTLS = False
# If we already have an inner protocol, then we don't deliver data to
# the protocol parser any more; we just hand it off.
if self.innerProtocol is not None:
self.innerProtocol.dataReceived(data)
return
return Int16StringReceiver.dataReceived(self, data)
def connectionLost(self, reason):
"""
The connection was lost; notify any nested protocol.
"""
if self.innerProtocol is not None:
self.innerProtocol.connectionLost(reason)
if self.innerProtocolClientFactory is not None:
self.innerProtocolClientFactory.clientConnectionLost(None, reason)
if self._keyLengthLimitExceeded:
failReason = Failure(TooLong(True, False, None, None))
elif reason.check(ConnectionClosed) and self._justStartedTLS:
# We just started TLS and haven't received any data. This means
# the other connection didn't like our cert (although they may not
# have told us why - later Twisted should make 'reason' into a TLS
# error.)
failReason = PeerVerifyError(
"Peer rejected our certificate for an unknown reason.")
else:
failReason = reason
self.boxReceiver.stopReceivingBoxes(failReason)
# The longest key allowed
_MAX_KEY_LENGTH = 255
# The longest value allowed (this is somewhat redundant, as longer values
# cannot be encoded - ah well).
_MAX_VALUE_LENGTH = 65535
# The first thing received is a key.
MAX_LENGTH = _MAX_KEY_LENGTH
def proto_init(self, string):
"""
String received in the 'init' state.
"""
self._currentBox = AmpBox()
return self.proto_key(string)
def proto_key(self, string):
"""
String received in the 'key' state. If the key is empty, a complete
box has been received.
"""
if string:
self._currentKey = string
self.MAX_LENGTH = self._MAX_VALUE_LENGTH
return 'value'
else:
self.boxReceiver.ampBoxReceived(self._currentBox)
self._currentBox = None
return 'init'
def proto_value(self, string):
"""
String received in the 'value' state.
"""
self._currentBox[self._currentKey] = string
self._currentKey = None
self.MAX_LENGTH = self._MAX_KEY_LENGTH
return 'key'
def lengthLimitExceeded(self, length):
"""
The key length limit was exceeded. Disconnect the transport and make
sure a meaningful exception is reported.
"""
self._keyLengthLimitExceeded = True
self.transport.loseConnection()
def _lockForSwitch(self):
"""
Lock this binary protocol so that no further boxes may be sent. This
is used when sending a request to switch underlying protocols. You
probably want to subclass ProtocolSwitchCommand rather than calling
this directly.
"""
self._locked = True
def _unlockFromSwitch(self):
"""
Unlock this locked binary protocol so that further boxes may be sent
again. This is used after an attempt to switch protocols has failed
for some reason.
"""
if self.innerProtocol is not None:
raise ProtocolSwitched("Protocol already switched. Cannot unlock.")
self._locked = False
def _prepareTLS(self, certificate, verifyAuthorities):
"""
Used by StartTLSCommand to put us into the state where we don't
actually send things that get sent, instead we buffer them. see
L{_sendBox}.
"""
self._startingTLSBuffer = []
if self.hostCertificate is not None:
raise OnlyOneTLS(
"Previously authenticated connection between %s and %s "
"is trying to re-establish as %s" % (
self.hostCertificate,
self.peerCertificate,
(certificate, verifyAuthorities)))
def _startTLS(self, certificate, verifyAuthorities):
"""
Used by TLSBox to initiate the SSL handshake.
@param certificate: a L{twisted.internet.ssl.PrivateCertificate} for
use locally.
@param verifyAuthorities: L{twisted.internet.ssl.Certificate} instances
representing certificate authorities which will verify our peer.
"""
self.hostCertificate = certificate
self._justStartedTLS = True
if verifyAuthorities is None:
verifyAuthorities = ()
self.transport.startTLS(certificate.options(*verifyAuthorities))
stlsb = self._startingTLSBuffer
if stlsb is not None:
self._startingTLSBuffer = None
for box in stlsb:
self.sendBox(box)
def _getPeerCertificate(self):
if self.noPeerCertificate:
return None
return Certificate.peerFromTransport(self.transport)
peerCertificate = property(_getPeerCertificate)
def unhandledError(self, failure):
"""
The buck stops here. This error was completely unhandled, time to
terminate the connection.
"""
log.msg("Amp server or network failure "
"unhandled by client application:")
log.err(failure)
log.msg(
"Dropping connection! "
"To avoid, add errbacks to ALL remote commands!")
if self.transport is not None:
self.transport.loseConnection()
def _defaultStartTLSResponder(self):
"""
The default TLS responder doesn't specify any certificate or anything.
From a security perspective, it's little better than a plain-text
connection - but it is still a *bit* better, so it's included for
convenience.
You probably want to override this by providing your own StartTLS.responder.
"""
return {}
StartTLS.responder(_defaultStartTLSResponder)
class AMP(BinaryBoxProtocol, BoxDispatcher,
CommandLocator, SimpleStringLocator):
"""
This protocol is an AMP connection. See the module docstring for protocol
details.
"""
_ampInitialized = False
def __init__(self, boxReceiver=None, locator=None):
# For backwards compatibility. When AMP did not separate parsing logic
# (L{BinaryBoxProtocol}), request-response logic (L{BoxDispatcher}) and
# command routing (L{CommandLocator}), it did not have a constructor.
# Now it does, so old subclasses might have defined their own that did
# not upcall. If this flag isn't set, we'll call the constructor in
# makeConnection before anything actually happens.
self._ampInitialized = True
if boxReceiver is None:
boxReceiver = self
if locator is None:
locator = self
BoxDispatcher.__init__(self, locator)
BinaryBoxProtocol.__init__(self, boxReceiver)
def locateResponder(self, name):
"""
Unify the implementations of L{CommandLocator} and
L{SimpleStringLocator} to perform both kinds of dispatch, preferring
L{CommandLocator}.
"""
firstResponder = CommandLocator.locateResponder(self, name)
if firstResponder is not None:
return firstResponder
secondResponder = SimpleStringLocator.locateResponder(self, name)
return secondResponder
def __repr__(self):
"""
A verbose string representation which gives us information about this
AMP connection.
"""
if self.innerProtocol is not None:
innerRepr = ' inner %r' % (self.innerProtocol,)
else:
innerRepr = ''
return '<%s%s at 0x%x>' % (
self.__class__.__name__, innerRepr, unsignedID(self))
def makeConnection(self, transport):
"""
Emit a helpful log message when the connection is made.
"""
if not self._ampInitialized:
# See comment in the constructor re: backward compatibility. I
# should probably emit a deprecation warning here.
AMP.__init__(self)
# Save these so we can emit a similar log message in L{connectionLost}.
self._transportPeer = transport.getPeer()
self._transportHost = transport.getHost()
log.msg("%s connection established (HOST:%s PEER:%s)" % (
self.__class__.__name__,
self._transportHost,
self._transportPeer))
BinaryBoxProtocol.makeConnection(self, transport)
def connectionLost(self, reason):
"""
Emit a helpful log message when the connection is lost.
"""
log.msg("%s connection lost (HOST:%s PEER:%s)" %
(self.__class__.__name__,
self._transportHost,
self._transportPeer))
BinaryBoxProtocol.connectionLost(self, reason)
self.transport = None
class _ParserHelper:
"""
A box receiver which records all boxes received.
"""
def __init__(self):
self.boxes = []
def getPeer(self):
return 'string'
def getHost(self):
return 'string'
disconnecting = False
def startReceivingBoxes(self, sender):
"""
No initialization is required.
"""
def ampBoxReceived(self, box):
self.boxes.append(box)
# Synchronous helpers
def parse(cls, fileObj):
"""
Parse some amp data stored in a file.
@param fileObj: a file-like object.
@return: a list of AmpBoxes encoded in the given file.
"""
parserHelper = cls()
bbp = BinaryBoxProtocol(boxReceiver=parserHelper)
bbp.makeConnection(parserHelper)
bbp.dataReceived(fileObj.read())
return parserHelper.boxes
parse = classmethod(parse)
def parseString(cls, data):
"""
Parse some amp data stored in a string.
@param data: a str holding some amp-encoded data.
@return: a list of AmpBoxes encoded in the given string.
"""
return cls.parse(StringIO(data))
parseString = classmethod(parseString)
parse = _ParserHelper.parse
parseString = _ParserHelper.parseString
def _stringsToObjects(strings, arglist, proto):
"""
Convert an AmpBox to a dictionary of python objects, converting through a
given arglist.
@param strings: an AmpBox (or dict of strings)
@param arglist: a list of 2-tuples of strings and Argument objects, as
described in L{Command.arguments}.
@param proto: an L{AMP} instance.
@return: the converted dictionary mapping names to argument objects.
"""
objects = {}
myStrings = strings.copy()
for argname, argparser in arglist:
argparser.fromBox(argname, myStrings, objects, proto)
return objects
def _objectsToStrings(objects, arglist, strings, proto):
"""
Convert a dictionary of python objects to an AmpBox, converting through a
given arglist.
@param objects: a dict mapping names to python objects
@param arglist: a list of 2-tuples of strings and Argument objects, as
described in L{Command.arguments}.
@param strings: [OUT PARAMETER] An object providing the L{dict}
interface which will be populated with serialized data.
@param proto: an L{AMP} instance.
@return: The converted dictionary mapping names to encoded argument
strings (identical to C{strings}).
"""
myObjects = objects.copy()
for argname, argparser in arglist:
argparser.toBox(argname, strings, myObjects, proto)
return strings
class _FixedOffsetTZInfo(datetime.tzinfo):
"""
Represents a fixed timezone offset (without daylight saving time).
@ivar name: A C{str} giving the name of this timezone; the name just
includes how much time this offset represents.
@ivar offset: A C{datetime.timedelta} giving the amount of time this
timezone is offset.
"""
def __init__(self, sign, hours, minutes):
self.name = '%s%02i:%02i' % (sign, hours, minutes)
if sign == '-':
hours = -hours
minutes = -minutes
elif sign != '+':
raise ValueError('invalid sign for timezone %r' % (sign,))
self.offset = datetime.timedelta(hours=hours, minutes=minutes)
def utcoffset(self, dt):
"""
Return this timezone's offset from UTC.
"""
return self.offset
def dst(self, dt):
"""
Return a zero C{datetime.timedelta} for the daylight saving time offset,
since there is never one.
"""
return datetime.timedelta(0)
def tzname(self, dt):
"""
Return a string describing this timezone.
"""
return self.name
utc = _FixedOffsetTZInfo('+', 0, 0)
class Decimal(Argument):
"""
Encodes C{decimal.Decimal} instances.
There are several ways in which a decimal value might be encoded.
Special values are encoded as special strings::
- Positive infinity is encoded as C{"Infinity"}
- Negative infinity is encoded as C{"-Infinity"}
- Quiet not-a-number is encoded as either C{"NaN"} or C{"-NaN"}
- Signalling not-a-number is encoded as either C{"sNaN"} or C{"-sNaN"}
Normal values are encoded using the base ten string representation, using
engineering notation to indicate magnitude without precision, and "normal"
digits to indicate precision. For example::
- C{"1"} represents the value I{1} with precision to one place.
- C{"-1"} represents the value I{-1} with precision to one place.
- C{"1.0"} represents the value I{1} with precision to two places.
- C{"10"} represents the value I{10} with precision to two places.
- C{"1E+2"} represents the value I{10} with precision to one place.
- C{"1E-1"} represents the value I{0.1} with precision to one place.
- C{"1.5E+2"} represents the value I{15} with precision to two places.
U{http://speleotrove.com/decimal/} should be considered the authoritative
specification for the format.
"""
fromString = decimal.Decimal
def toString(self, inObject):
"""
Serialize a C{decimal.Decimal} instance to the specified wire format.
"""
if isinstance(inObject, decimal.Decimal):
# Hopefully decimal.Decimal.__str__ actually does what we want.
return str(inObject)
raise ValueError(
"amp.Decimal can only encode instances of decimal.Decimal")
class DateTime(Argument):
"""
Encodes C{datetime.datetime} instances.
Wire format: '%04i-%02i-%02iT%02i:%02i:%02i.%06i%s%02i:%02i'. Fields in
order are: year, month, day, hour, minute, second, microsecond, timezone
direction (+ or -), timezone hour, timezone minute. Encoded string is
always exactly 32 characters long. This format is compatible with ISO 8601,
but that does not mean all ISO 8601 dates can be accepted.
Also, note that the datetime module's notion of a "timezone" can be
complex, but the wire format includes only a fixed offset, so the
conversion is not lossless. A lossless transmission of a C{datetime} instance
is not feasible since the receiving end would require a Python interpreter.
@ivar _positions: A sequence of slices giving the positions of various
interesting parts of the wire format.
"""
_positions = [
slice(0, 4), slice(5, 7), slice(8, 10), # year, month, day
slice(11, 13), slice(14, 16), slice(17, 19), # hour, minute, second
slice(20, 26), # microsecond
# intentionally skip timezone direction, as it is not an integer
slice(27, 29), slice(30, 32) # timezone hour, timezone minute
]
def fromString(self, s):
"""
Parse a string containing a date and time in the wire format into a
C{datetime.datetime} instance.
"""
if len(s) != 32:
raise ValueError('invalid date format %r' % (s,))
values = [int(s[p]) for p in self._positions]
sign = s[26]
timezone = _FixedOffsetTZInfo(sign, *values[7:])
values[7:] = [timezone]
return datetime.datetime(*values)
def toString(self, i):
"""
Serialize a C{datetime.datetime} instance to a string in the specified
wire format.
"""
offset = i.utcoffset()
if offset is None:
raise ValueError(
'amp.DateTime cannot serialize naive datetime instances. '
'You may find amp.utc useful.')
minutesOffset = (offset.days * 86400 + offset.seconds) // 60
if minutesOffset > 0:
sign = '+'
else:
sign = '-'
# strftime has no way to format the microseconds, or put a ':' in the
# timezone. Suprise!
return '%04i-%02i-%02iT%02i:%02i:%02i.%06i%s%02i:%02i' % (
i.year,
i.month,
i.day,
i.hour,
i.minute,
i.second,
i.microsecond,
sign,
abs(minutesOffset) // 60,
abs(minutesOffset) % 60)
| agpl-3.0 |
adamwwt/chvac | venv/lib/python2.7/site-packages/pygments/lexers/math.py | 71 | 76438 | # -*- coding: utf-8 -*-
"""
pygments.lexers.math
~~~~~~~~~~~~~~~~~~~~
Lexers for math languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.util import shebang_matches
from pygments.lexer import Lexer, RegexLexer, bygroups, include, \
combined, do_insertions
from pygments.token import Comment, String, Punctuation, Keyword, Name, \
Operator, Number, Text, Generic
from pygments.lexers.agile import PythonLexer
from pygments.lexers import _scilab_builtins
from pygments.lexers import _stan_builtins
__all__ = ['JuliaLexer', 'JuliaConsoleLexer', 'MuPADLexer', 'MatlabLexer',
'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer', 'NumPyLexer',
'RConsoleLexer', 'SLexer', 'JagsLexer', 'BugsLexer', 'StanLexer',
'IDLLexer', 'RdLexer']
class JuliaLexer(RegexLexer):
"""
For `Julia <http://julialang.org/>`_ source code.
*New in Pygments 1.6.*
"""
name = 'Julia'
aliases = ['julia','jl']
filenames = ['*.jl']
mimetypes = ['text/x-julia','application/x-julia']
builtins = [
'exit','whos','edit','load','is','isa','isequal','typeof','tuple',
'ntuple','uid','hash','finalizer','convert','promote','subtype',
'typemin','typemax','realmin','realmax','sizeof','eps','promote_type',
'method_exists','applicable','invoke','dlopen','dlsym','system',
'error','throw','assert','new','Inf','Nan','pi','im',
]
tokens = {
'root': [
(r'\n', Text),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[@]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
# keywords
(r'(begin|while|for|in|return|break|continue|'
r'macro|quote|let|if|elseif|else|try|catch|end|'
r'bitstype|ccall|do|using|module|import|export|'
r'importall|baremodule)\b', Keyword),
(r'(local|global|const)\b', Keyword.Declaration),
(r'(Bool|Int|Int8|Int16|Int32|Int64|Uint|Uint8|Uint16|Uint32|Uint64'
r'|Float32|Float64|Complex64|Complex128|Any|Nothing|None)\b',
Keyword.Type),
# functions
(r'(function)((?:\s|\\\s)+)',
bygroups(Keyword,Name.Function), 'funcname'),
# types
(r'(type|typealias|abstract)((?:\s|\\\s)+)',
bygroups(Keyword,Name.Class), 'typename'),
# operators
(r'==|!=|<=|>=|->|&&|\|\||::|<:|[-~+/*%=<>&^|.?!$]', Operator),
(r'\.\*|\.\^|\.\\|\.\/|\\', Operator),
# builtins
('(' + '|'.join(builtins) + r')\b', Name.Builtin),
# backticks
(r'`(?s).*?`', String.Backtick),
# chars
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,3}|\\u[a-fA-F0-9]{1,4}|"
r"\\U[a-fA-F0-9]{1,6}|[^\\\'\n])'", String.Char),
# try to match trailing transpose
(r'(?<=[.\w\)\]])\'+', Operator),
# strings
(r'(?:[IL])"', String, 'string'),
(r'[E]?"', String, combined('stringescape', 'string')),
# names
(r'@[a-zA-Z0-9_.]+', Name.Decorator),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
# numbers
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'0b[01]+', Number.Binary),
(r'0o[0-7]+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer)
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop'),
('\([^\s\w{]{1,2}\)', Operator, '#pop'),
('[^\s\w{]{1,2}', Operator, '#pop'),
],
'typename': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'string': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
(r'\$(\([a-zA-Z0-9_]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?',
String.Interpol),
(r'[^\\"$]+', String),
# quotes, dollar signs, and backslashes must be parsed one at a time
(r'["\\]', String),
# unhandled string formatting sign
(r'\$', String)
],
}
def analyse_text(text):
return shebang_matches(text, r'julia')
line_re = re.compile('.*?\n')
class JuliaConsoleLexer(Lexer):
"""
For Julia console sessions. Modeled after MatlabSessionLexer.
*New in Pygments 1.6.*
"""
name = 'Julia console'
aliases = ['jlcon']
def get_tokens_unprocessed(self, text):
jllexer = JuliaLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('julia>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith(' '):
idx = len(curcode)
# without is showing error on same line as before...?
line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append((idx, [token]))
else:
if curcode:
for item in do_insertions(
insertions, jllexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode: # or item:
for item in do_insertions(
insertions, jllexer.get_tokens_unprocessed(curcode)):
yield item
class MuPADLexer(RegexLexer):
"""
A `MuPAD <http://www.mupad.com>`_ lexer.
Contributed by Christopher Creutzig <christopher@creutzig.de>.
*New in Pygments 0.8.*
"""
name = 'MuPAD'
aliases = ['mupad']
filenames = ['*.mu']
tokens = {
'root' : [
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
next|break|end|
axiom|end_axiom|category|end_category|domain|end_domain|inherits|
if|%if|then|elif|else|end_if|
case|of|do|otherwise|end_case|
while|end_while|
repeat|until|end_repeat|
for|from|to|downto|step|end_for|
proc|local|option|save|begin|end_proc|
delete|frame
)\b''', Keyword),
(r'''(?x)\b(?:
DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
)\b''', Name.Class),
(r'''(?x)\b(?:
PI|EULER|E|CATALAN|
NIL|FAIL|undefined|infinity|
TRUE|FALSE|UNKNOWN
)\b''',
Name.Constant),
(r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
(r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
(r'''(?x)\b(?:
and|or|not|xor|
assuming|
div|mod|
union|minus|intersect|in|subset
)\b''',
Operator.Word),
(r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
#(r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
(r'''(?x)
((?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*)(\s*)([(])''',
bygroups(Name.Function, Text, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
'comment' : [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
]
}
class MatlabLexer(RegexLexer):
"""
For Matlab source code.
*New in Pygments 0.10.*
"""
name = 'Matlab'
aliases = ['matlab']
filenames = ['*.m']
mimetypes = ['text/matlab']
#
# These lists are generated automatically.
# Run the following in bash shell:
#
# for f in elfun specfun elmat; do
# echo -n "$f = "
# matlab -nojvm -r "help $f;exit;" | perl -ne \
# 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}'
# done
#
# elfun: Elementary math functions
# specfun: Special Math functions
# elmat: Elementary matrices and matrix manipulation
#
# taken from Matlab version 7.4.0.336 (R2007a)
#
elfun = ["sin","sind","sinh","asin","asind","asinh","cos","cosd","cosh",
"acos","acosd","acosh","tan","tand","tanh","atan","atand","atan2",
"atanh","sec","secd","sech","asec","asecd","asech","csc","cscd",
"csch","acsc","acscd","acsch","cot","cotd","coth","acot","acotd",
"acoth","hypot","exp","expm1","log","log1p","log10","log2","pow2",
"realpow","reallog","realsqrt","sqrt","nthroot","nextpow2","abs",
"angle","complex","conj","imag","real","unwrap","isreal","cplxpair",
"fix","floor","ceil","round","mod","rem","sign"]
specfun = ["airy","besselj","bessely","besselh","besseli","besselk","beta",
"betainc","betaln","ellipj","ellipke","erf","erfc","erfcx",
"erfinv","expint","gamma","gammainc","gammaln","psi","legendre",
"cross","dot","factor","isprime","primes","gcd","lcm","rat",
"rats","perms","nchoosek","factorial","cart2sph","cart2pol",
"pol2cart","sph2cart","hsv2rgb","rgb2hsv"]
elmat = ["zeros","ones","eye","repmat","rand","randn","linspace","logspace",
"freqspace","meshgrid","accumarray","size","length","ndims","numel",
"disp","isempty","isequal","isequalwithequalnans","cat","reshape",
"diag","blkdiag","tril","triu","fliplr","flipud","flipdim","rot90",
"find","end","sub2ind","ind2sub","bsxfun","ndgrid","permute",
"ipermute","shiftdim","circshift","squeeze","isscalar","isvector",
"ans","eps","realmax","realmin","pi","i","inf","nan","isnan",
"isinf","isfinite","j","why","compan","gallery","hadamard","hankel",
"hilb","invhilb","magic","pascal","rosser","toeplitz","vander",
"wilkinson"]
tokens = {
'root': [
# line starting with '!' is sent as a system command. not sure what
# label to use...
(r'^!.*', String.Other),
(r'%\{\s*\n', Comment.Multiline, 'blockcomment'),
(r'%.*$', Comment),
(r'^\s*function', Keyword, 'deffunc'),
# from 'iskeyword' on version 7.11 (R2010):
(r'(break|case|catch|classdef|continue|else|elseif|end|enumerated|'
r'events|for|function|global|if|methods|otherwise|parfor|'
r'persistent|properties|return|spmd|switch|try|while)\b', Keyword),
("(" + "|".join(elfun+specfun+elmat) + r')\b', Name.Builtin),
# line continuation with following comment:
(r'\.\.\..*$', Comment),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer),
(r'(?<![\w\)\]])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r'[^\']*\'', String, '#pop')
],
'blockcomment': [
(r'^\s*%\}', Comment.Multiline, '#pop'),
(r'^.*\n', Comment.Multiline),
(r'.', Comment.Multiline),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
def analyse_text(text):
if re.match('^\s*%', text, re.M): # comment
return 0.9
elif re.match('^!\w+', text, re.M): # system cmd
return 0.9
return 0.1
line_re = re.compile('.*?\n')
class MatlabSessionLexer(Lexer):
"""
For Matlab sessions. Modeled after PythonConsoleLexer.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
*New in Pygments 0.10.*
"""
name = 'Matlab session'
aliases = ['matlabsession']
def get_tokens_unprocessed(self, text):
mlexer = MatlabLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith('???'):
idx = len(curcode)
# without is showing error on same line as before...?
line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append((idx, [token]))
else:
if curcode:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode: # or item:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
class OctaveLexer(RegexLexer):
"""
For GNU Octave source code.
*New in Pygments 1.5.*
"""
name = 'Octave'
aliases = ['octave']
filenames = ['*.m']
mimetypes = ['text/octave']
# These lists are generated automatically.
# Run the following in bash shell:
#
# First dump all of the Octave manual into a plain text file:
#
# $ info octave --subnodes -o octave-manual
#
# Now grep through it:
# for i in \
# "Built-in Function" "Command" "Function File" \
# "Loadable Function" "Mapping Function";
# do
# perl -e '@name = qw('"$i"');
# print lc($name[0]),"_kw = [\n"';
#
# perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \
# octave-manual | sort | uniq ;
# echo "]" ;
# echo;
# done
# taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011)
builtin_kw = [ "addlistener", "addpath", "addproperty", "all",
"and", "any", "argnames", "argv", "assignin",
"atexit", "autoload",
"available_graphics_toolkits", "beep_on_error",
"bitand", "bitmax", "bitor", "bitshift", "bitxor",
"cat", "cell", "cellstr", "char", "class", "clc",
"columns", "command_line_path",
"completion_append_char", "completion_matches",
"complex", "confirm_recursive_rmdir", "cputime",
"crash_dumps_octave_core", "ctranspose", "cumprod",
"cumsum", "debug_on_error", "debug_on_interrupt",
"debug_on_warning", "default_save_options",
"dellistener", "diag", "diff", "disp",
"doc_cache_file", "do_string_escapes", "double",
"drawnow", "e", "echo_executing_commands", "eps",
"eq", "errno", "errno_list", "error", "eval",
"evalin", "exec", "exist", "exit", "eye", "false",
"fclear", "fclose", "fcntl", "fdisp", "feof",
"ferror", "feval", "fflush", "fgetl", "fgets",
"fieldnames", "file_in_loadpath", "file_in_path",
"filemarker", "filesep", "find_dir_in_path",
"fixed_point_format", "fnmatch", "fopen", "fork",
"formula", "fprintf", "fputs", "fread", "freport",
"frewind", "fscanf", "fseek", "fskipl", "ftell",
"functions", "fwrite", "ge", "genpath", "get",
"getegid", "getenv", "geteuid", "getgid",
"getpgrp", "getpid", "getppid", "getuid", "glob",
"gt", "gui_mode", "history_control",
"history_file", "history_size",
"history_timestamp_format_string", "home",
"horzcat", "hypot", "ifelse",
"ignore_function_time_stamp", "inferiorto",
"info_file", "info_program", "inline", "input",
"intmax", "intmin", "ipermute",
"is_absolute_filename", "isargout", "isbool",
"iscell", "iscellstr", "ischar", "iscomplex",
"isempty", "isfield", "isfloat", "isglobal",
"ishandle", "isieee", "isindex", "isinteger",
"islogical", "ismatrix", "ismethod", "isnull",
"isnumeric", "isobject", "isreal",
"is_rooted_relative_filename", "issorted",
"isstruct", "isvarname", "kbhit", "keyboard",
"kill", "lasterr", "lasterror", "lastwarn",
"ldivide", "le", "length", "link", "linspace",
"logical", "lstat", "lt", "make_absolute_filename",
"makeinfo_program", "max_recursion_depth", "merge",
"methods", "mfilename", "minus", "mislocked",
"mkdir", "mkfifo", "mkstemp", "mldivide", "mlock",
"mouse_wheel_zoom", "mpower", "mrdivide", "mtimes",
"munlock", "nargin", "nargout",
"native_float_format", "ndims", "ne", "nfields",
"nnz", "norm", "not", "numel", "nzmax",
"octave_config_info", "octave_core_file_limit",
"octave_core_file_name",
"octave_core_file_options", "ones", "or",
"output_max_field_width", "output_precision",
"page_output_immediately", "page_screen_output",
"path", "pathsep", "pause", "pclose", "permute",
"pi", "pipe", "plus", "popen", "power",
"print_empty_dimensions", "printf",
"print_struct_array_contents", "prod",
"program_invocation_name", "program_name",
"putenv", "puts", "pwd", "quit", "rats", "rdivide",
"readdir", "readlink", "read_readline_init_file",
"realmax", "realmin", "rehash", "rename",
"repelems", "re_read_readline_init_file", "reset",
"reshape", "resize", "restoredefaultpath",
"rethrow", "rmdir", "rmfield", "rmpath", "rows",
"save_header_format_string", "save_precision",
"saving_history", "scanf", "set", "setenv",
"shell_cmd", "sighup_dumps_octave_core",
"sigterm_dumps_octave_core", "silent_functions",
"single", "size", "size_equal", "sizemax",
"sizeof", "sleep", "source", "sparse_auto_mutate",
"split_long_rows", "sprintf", "squeeze", "sscanf",
"stat", "stderr", "stdin", "stdout", "strcmp",
"strcmpi", "string_fill_char", "strncmp",
"strncmpi", "struct", "struct_levels_to_print",
"strvcat", "subsasgn", "subsref", "sum", "sumsq",
"superiorto", "suppress_verbose_help_message",
"symlink", "system", "tic", "tilde_expand",
"times", "tmpfile", "tmpnam", "toc", "toupper",
"transpose", "true", "typeinfo", "umask", "uminus",
"uname", "undo_string_escapes", "unlink", "uplus",
"upper", "usage", "usleep", "vec", "vectorize",
"vertcat", "waitpid", "warning", "warranty",
"whos_line_format", "yes_or_no", "zeros",
"inf", "Inf", "nan", "NaN"]
command_kw = [ "close", "load", "who", "whos", ]
function_kw = [ "accumarray", "accumdim", "acosd", "acotd",
"acscd", "addtodate", "allchild", "ancestor",
"anova", "arch_fit", "arch_rnd", "arch_test",
"area", "arma_rnd", "arrayfun", "ascii", "asctime",
"asecd", "asind", "assert", "atand",
"autoreg_matrix", "autumn", "axes", "axis", "bar",
"barh", "bartlett", "bartlett_test", "beep",
"betacdf", "betainv", "betapdf", "betarnd",
"bicgstab", "bicubic", "binary", "binocdf",
"binoinv", "binopdf", "binornd", "bitcmp",
"bitget", "bitset", "blackman", "blanks",
"blkdiag", "bone", "box", "brighten", "calendar",
"cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf",
"cauchy_rnd", "caxis", "celldisp", "center", "cgs",
"chisquare_test_homogeneity",
"chisquare_test_independence", "circshift", "cla",
"clabel", "clf", "clock", "cloglog", "closereq",
"colon", "colorbar", "colormap", "colperm",
"comet", "common_size", "commutation_matrix",
"compan", "compare_versions", "compass",
"computer", "cond", "condest", "contour",
"contourc", "contourf", "contrast", "conv",
"convhull", "cool", "copper", "copyfile", "cor",
"corrcoef", "cor_test", "cosd", "cotd", "cov",
"cplxpair", "cross", "cscd", "cstrcat", "csvread",
"csvwrite", "ctime", "cumtrapz", "curl", "cut",
"cylinder", "date", "datenum", "datestr",
"datetick", "datevec", "dblquad", "deal",
"deblank", "deconv", "delaunay", "delaunayn",
"delete", "demo", "detrend", "diffpara", "diffuse",
"dir", "discrete_cdf", "discrete_inv",
"discrete_pdf", "discrete_rnd", "display",
"divergence", "dlmwrite", "dos", "dsearch",
"dsearchn", "duplication_matrix", "durbinlevinson",
"ellipsoid", "empirical_cdf", "empirical_inv",
"empirical_pdf", "empirical_rnd", "eomday",
"errorbar", "etime", "etreeplot", "example",
"expcdf", "expinv", "expm", "exppdf", "exprnd",
"ezcontour", "ezcontourf", "ezmesh", "ezmeshc",
"ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor",
"factorial", "fail", "fcdf", "feather", "fftconv",
"fftfilt", "fftshift", "figure", "fileattrib",
"fileparts", "fill", "findall", "findobj",
"findstr", "finv", "flag", "flipdim", "fliplr",
"flipud", "fpdf", "fplot", "fractdiff", "freqz",
"freqz_plot", "frnd", "fsolve",
"f_test_regression", "ftp", "fullfile", "fzero",
"gamcdf", "gaminv", "gampdf", "gamrnd", "gca",
"gcbf", "gcbo", "gcf", "genvarname", "geocdf",
"geoinv", "geopdf", "geornd", "getfield", "ginput",
"glpk", "gls", "gplot", "gradient",
"graphics_toolkit", "gray", "grid", "griddata",
"griddatan", "gtext", "gunzip", "gzip", "hadamard",
"hamming", "hankel", "hanning", "hggroup",
"hidden", "hilb", "hist", "histc", "hold", "hot",
"hotelling_test", "housh", "hsv", "hurst",
"hygecdf", "hygeinv", "hygepdf", "hygernd",
"idivide", "ifftshift", "image", "imagesc",
"imfinfo", "imread", "imshow", "imwrite", "index",
"info", "inpolygon", "inputname", "interpft",
"interpn", "intersect", "invhilb", "iqr", "isa",
"isdefinite", "isdir", "is_duplicate_entry",
"isequal", "isequalwithequalnans", "isfigure",
"ishermitian", "ishghandle", "is_leap_year",
"isletter", "ismac", "ismember", "ispc", "isprime",
"isprop", "isscalar", "issquare", "isstrprop",
"issymmetric", "isunix", "is_valid_file_id",
"isvector", "jet", "kendall",
"kolmogorov_smirnov_cdf",
"kolmogorov_smirnov_test", "kruskal_wallis_test",
"krylov", "kurtosis", "laplace_cdf", "laplace_inv",
"laplace_pdf", "laplace_rnd", "legend", "legendre",
"license", "line", "linkprop", "list_primes",
"loadaudio", "loadobj", "logistic_cdf",
"logistic_inv", "logistic_pdf", "logistic_rnd",
"logit", "loglog", "loglogerr", "logm", "logncdf",
"logninv", "lognpdf", "lognrnd", "logspace",
"lookfor", "ls_command", "lsqnonneg", "magic",
"mahalanobis", "manova", "matlabroot",
"mcnemar_test", "mean", "meansq", "median", "menu",
"mesh", "meshc", "meshgrid", "meshz", "mexext",
"mget", "mkpp", "mode", "moment", "movefile",
"mpoles", "mput", "namelengthmax", "nargchk",
"nargoutchk", "nbincdf", "nbininv", "nbinpdf",
"nbinrnd", "nchoosek", "ndgrid", "newplot", "news",
"nonzeros", "normcdf", "normest", "norminv",
"normpdf", "normrnd", "now", "nthroot", "null",
"ocean", "ols", "onenormest", "optimget",
"optimset", "orderfields", "orient", "orth",
"pack", "pareto", "parseparams", "pascal", "patch",
"pathdef", "pcg", "pchip", "pcolor", "pcr",
"peaks", "periodogram", "perl", "perms", "pie",
"pink", "planerot", "playaudio", "plot",
"plotmatrix", "plotyy", "poisscdf", "poissinv",
"poisspdf", "poissrnd", "polar", "poly",
"polyaffine", "polyarea", "polyderiv", "polyfit",
"polygcd", "polyint", "polyout", "polyreduce",
"polyval", "polyvalm", "postpad", "powerset",
"ppder", "ppint", "ppjumps", "ppplot", "ppval",
"pqpnonneg", "prepad", "primes", "print",
"print_usage", "prism", "probit", "qp", "qqplot",
"quadcc", "quadgk", "quadl", "quadv", "quiver",
"qzhess", "rainbow", "randi", "range", "rank",
"ranks", "rat", "reallog", "realpow", "realsqrt",
"record", "rectangle_lw", "rectangle_sw",
"rectint", "refresh", "refreshdata",
"regexptranslate", "repmat", "residue", "ribbon",
"rindex", "roots", "rose", "rosser", "rotdim",
"rref", "run", "run_count", "rundemos", "run_test",
"runtests", "saveas", "saveaudio", "saveobj",
"savepath", "scatter", "secd", "semilogx",
"semilogxerr", "semilogy", "semilogyerr",
"setaudio", "setdiff", "setfield", "setxor",
"shading", "shift", "shiftdim", "sign_test",
"sinc", "sind", "sinetone", "sinewave", "skewness",
"slice", "sombrero", "sortrows", "spaugment",
"spconvert", "spdiags", "spearman", "spectral_adf",
"spectral_xdf", "specular", "speed", "spencer",
"speye", "spfun", "sphere", "spinmap", "spline",
"spones", "sprand", "sprandn", "sprandsym",
"spring", "spstats", "spy", "sqp", "stairs",
"statistics", "std", "stdnormal_cdf",
"stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd",
"stem", "stft", "strcat", "strchr", "strjust",
"strmatch", "strread", "strsplit", "strtok",
"strtrim", "strtrunc", "structfun", "studentize",
"subplot", "subsindex", "subspace", "substr",
"substruct", "summer", "surf", "surface", "surfc",
"surfl", "surfnorm", "svds", "swapbytes",
"sylvester_matrix", "symvar", "synthesis", "table",
"tand", "tar", "tcdf", "tempdir", "tempname",
"test", "text", "textread", "textscan", "tinv",
"title", "toeplitz", "tpdf", "trace", "trapz",
"treelayout", "treeplot", "triangle_lw",
"triangle_sw", "tril", "trimesh", "triplequad",
"triplot", "trisurf", "triu", "trnd", "tsearchn",
"t_test", "t_test_regression", "type", "unidcdf",
"unidinv", "unidpdf", "unidrnd", "unifcdf",
"unifinv", "unifpdf", "unifrnd", "union", "unique",
"unix", "unmkpp", "unpack", "untabify", "untar",
"unwrap", "unzip", "u_test", "validatestring",
"vander", "var", "var_test", "vech", "ver",
"version", "view", "voronoi", "voronoin",
"waitforbuttonpress", "wavread", "wavwrite",
"wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday",
"welch_test", "what", "white", "whitebg",
"wienrnd", "wilcoxon_test", "wilkinson", "winter",
"xlabel", "xlim", "ylabel", "yulewalker", "zip",
"zlabel", "z_test", ]
loadable_kw = [ "airy", "amd", "balance", "besselh", "besseli",
"besselj", "besselk", "bessely", "bitpack",
"bsxfun", "builtin", "ccolamd", "cellfun",
"cellslices", "chol", "choldelete", "cholinsert",
"cholinv", "cholshift", "cholupdate", "colamd",
"colloc", "convhulln", "convn", "csymamd",
"cummax", "cummin", "daspk", "daspk_options",
"dasrt", "dasrt_options", "dassl", "dassl_options",
"dbclear", "dbdown", "dbstack", "dbstatus",
"dbstop", "dbtype", "dbup", "dbwhere", "det",
"dlmread", "dmperm", "dot", "eig", "eigs",
"endgrent", "endpwent", "etree", "fft", "fftn",
"fftw", "filter", "find", "full", "gcd",
"getgrent", "getgrgid", "getgrnam", "getpwent",
"getpwnam", "getpwuid", "getrusage", "givens",
"gmtime", "gnuplot_binary", "hess", "ifft",
"ifftn", "inv", "isdebugmode", "issparse", "kron",
"localtime", "lookup", "lsode", "lsode_options",
"lu", "luinc", "luupdate", "matrix_type", "max",
"min", "mktime", "pinv", "qr", "qrdelete",
"qrinsert", "qrshift", "qrupdate", "quad",
"quad_options", "qz", "rand", "rande", "randg",
"randn", "randp", "randperm", "rcond", "regexp",
"regexpi", "regexprep", "schur", "setgrent",
"setpwent", "sort", "spalloc", "sparse", "spparms",
"sprank", "sqrtm", "strfind", "strftime",
"strptime", "strrep", "svd", "svd_driver", "syl",
"symamd", "symbfact", "symrcm", "time", "tsearch",
"typecast", "urlread", "urlwrite", ]
mapping_kw = [ "abs", "acos", "acosh", "acot", "acoth", "acsc",
"acsch", "angle", "arg", "asec", "asech", "asin",
"asinh", "atan", "atanh", "beta", "betainc",
"betaln", "bincoeff", "cbrt", "ceil", "conj", "cos",
"cosh", "cot", "coth", "csc", "csch", "erf", "erfc",
"erfcx", "erfinv", "exp", "finite", "fix", "floor",
"fmod", "gamma", "gammainc", "gammaln", "imag",
"isalnum", "isalpha", "isascii", "iscntrl",
"isdigit", "isfinite", "isgraph", "isinf",
"islower", "isna", "isnan", "isprint", "ispunct",
"isspace", "isupper", "isxdigit", "lcm", "lgamma",
"log", "lower", "mod", "real", "rem", "round",
"roundb", "sec", "sech", "sign", "sin", "sinh",
"sqrt", "tan", "tanh", "toascii", "tolower", "xor",
]
builtin_consts = [ "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA",
"OCTAVE_HOME", "OCTAVE_VERSION", "PAGER",
"PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET",
"SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO",
"S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE",
"WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED",
"WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG",
"WSTOPSIG", "WTERMSIG", "WUNTRACED", ]
tokens = {
'root': [
#We should look into multiline comments
(r'[%#].*$', Comment),
(r'^\s*function', Keyword, 'deffunc'),
# from 'iskeyword' on hg changeset 8cc154f45e37
(r'(__FILE__|__LINE__|break|case|catch|classdef|continue|do|else|'
r'elseif|end|end_try_catch|end_unwind_protect|endclassdef|'
r'endevents|endfor|endfunction|endif|endmethods|endproperties|'
r'endswitch|endwhile|events|for|function|get|global|if|methods|'
r'otherwise|persistent|properties|return|set|static|switch|try|'
r'until|unwind_protect|unwind_protect_cleanup|while)\b', Keyword),
("(" + "|".join( builtin_kw + command_kw
+ function_kw + loadable_kw
+ mapping_kw) + r')\b', Name.Builtin),
("(" + "|".join(builtin_consts) + r')\b', Name.Constant),
# operators in Octave but not Matlab:
(r'-=|!=|!|/=|--', Operator),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators in Octave but not Matlab requiring escape for re:
(r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*',Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
(r'"[^"]*"', String),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(?<![\w\)\]])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r"[^']*'", String, '#pop'),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
def analyse_text(text):
if re.match('^\s*[%#]', text, re.M): #Comment
return 0.1
class ScilabLexer(RegexLexer):
"""
For Scilab source code.
*New in Pygments 1.5.*
"""
name = 'Scilab'
aliases = ['scilab']
filenames = ['*.sci', '*.sce', '*.tst']
mimetypes = ['text/scilab']
tokens = {
'root': [
(r'//.*?$', Comment.Single),
(r'^\s*function', Keyword, 'deffunc'),
(r'(__FILE__|__LINE__|break|case|catch|classdef|continue|do|else|'
r'elseif|end|end_try_catch|end_unwind_protect|endclassdef|'
r'endevents|endfor|endfunction|endif|endmethods|endproperties|'
r'endswitch|endwhile|events|for|function|get|global|if|methods|'
r'otherwise|persistent|properties|return|set|static|switch|try|'
r'until|unwind_protect|unwind_protect_cleanup|while)\b', Keyword),
("(" + "|".join(_scilab_builtins.functions_kw +
_scilab_builtins.commands_kw +
_scilab_builtins.macros_kw
) + r')\b', Name.Builtin),
(r'(%s)\b' % "|".join(map(re.escape, _scilab_builtins.builtin_consts)),
Name.Constant),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'[\[\](){}@.,=:;]', Punctuation),
(r'"[^"]*"', String),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(?<![\w\)\]])\'', String, 'string'),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r"[^']*'", String, '#pop'),
(r'.', String, '#pop'),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
class NumPyLexer(PythonLexer):
"""
A Python lexer recognizing Numerical Python builtins.
*New in Pygments 0.10.*
"""
name = 'NumPy'
aliases = ['numpy']
# override the mimetypes to not inherit them from python
mimetypes = []
filenames = []
EXTRA_KEYWORDS = set([
'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
'set_numeric_ops', 'set_printoptions', 'set_string_function',
'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
])
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
class RConsoleLexer(Lexer):
"""
For R console transcripts or R CMD BATCH output files.
"""
name = 'RConsole'
aliases = ['rconsole', 'rout']
filenames = ['*.Rout']
def get_tokens_unprocessed(self, text):
slexer = SLexer(**self.options)
current_code_block = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>') or line.startswith('+'):
# Colorize the prompt as such,
# then put rest of line into current_code_block
insertions.append((len(current_code_block),
[(0, Generic.Prompt, line[:2])]))
current_code_block += line[2:]
else:
# We have reached a non-prompt line!
# If we have stored prompt lines, need to process them first.
if current_code_block:
# Weave together the prompts and highlight code.
for item in do_insertions(insertions,
slexer.get_tokens_unprocessed(current_code_block)):
yield item
# Reset vars for next code block.
current_code_block = ''
insertions = []
# Now process the actual line itself, this is output from R.
yield match.start(), Generic.Output, line
# If we happen to end on a code block with nothing after it, need to
# process the last code block. This is neither elegant nor DRY so
# should be changed.
if current_code_block:
for item in do_insertions(insertions,
slexer.get_tokens_unprocessed(current_code_block)):
yield item
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
*New in Pygments 0.10.*
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile']
mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r',
'text/x-R', 'text/x-r-history', 'text/x-r-profile']
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(r'[a-zA-Z][0-9a-zA-Z\._]*', Text),
# can begin with ., but not if that is followed by a digit
(r'\.[a-zA-Z_][0-9a-zA-Z\._]*', Text),
],
'punctuation': [
(r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation),
],
'keywords': [
(r'(if|else|for|while|repeat|in|next|break|return|switch|function)'
r'(?![0-9a-zA-Z\._])',
Keyword.Reserved)
],
'operators': [
(r'<<?-|->>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator),
(r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator)
],
'builtin_symbols': [
(r'(NULL|NA(_(integer|real|complex|character)_)?|'
r'Inf|TRUE|FALSE|NaN|\.\.(\.|[0-9]+))'
r'(?![0-9a-zA-Z\._])',
Keyword.Constant),
(r'(T|F)\b', Keyword.Variable),
],
'numbers': [
# hex number
(r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex),
# decimal number
(r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+)([eE][+-]?[0-9]+)?[Li]?',
Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'`.*?`', String.Backtick),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
include('valid_name'),
],
'root': [
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
#(r'\{', Punctuation, 'block'),
(r'.', Text),
],
#'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
#],
'string_squote': [
(r'([^\'\\]|\\.)*\'', String, '#pop'),
],
'string_dquote': [
(r'([^"\\]|\\.)*"', String, '#pop'),
],
}
def analyse_text(text):
return '<-' in text
class BugsLexer(RegexLexer):
"""
Pygments Lexer for `OpenBugs <http://www.openbugs.info/w/>`_ and WinBugs
models.
*New in Pygments 1.6.*
"""
name = 'BUGS'
aliases = ['bugs', 'winbugs', 'openbugs']
filenames = ['*.bug']
_FUNCTIONS = [
# Scalar functions
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance',
'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log',
'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value',
'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior',
'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh',
'trunc',
# Vector functions
'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals',
'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM',
'sd', 'sort', 'sum',
## Special
'D', 'I', 'F', 'T', 'C']
""" OpenBUGS built-in functions
From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII
This also includes
- T, C, I : Truncation and censoring.
``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS.
- D : ODE
- F : Functional http://www.openbugs.info/Examples/Functionals.html
"""
_DISTRIBUTIONS = ['dbern', 'dbin', 'dcat', 'dnegbin', 'dpois',
'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp',
'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar',
'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar',
'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm',
'dmt', 'dwish']
""" OpenBUGS built-in distributions
Functions from
http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI
"""
tokens = {
'whitespace' : [
(r"\s+", Text),
],
'comments' : [
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(model)(\s+)({)',
bygroups(Keyword.Namespace, Text, Punctuation)),
# Reserved Words
(r'(for|in)(?![0-9a-zA-Z\._])', Keyword.Reserved),
# Built-in Functions
(r'(%s)(?=\s*\()'
% r'|'.join(_FUNCTIONS + _DISTRIBUTIONS),
Name.Builtin),
# Regular variable names
(r'[A-Za-z][A-Za-z0-9_.]*', Name),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
# Punctuation
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
# SLexer makes these tokens Operators.
(r'<-|~', Operator),
# Infix and prefix operators
(r'\+|-|\*|/', Operator),
# Block
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r"^\s*model\s*{", text, re.M):
return 0.7
else:
return 0.0
class JagsLexer(RegexLexer):
"""
Pygments Lexer for JAGS.
*New in Pygments 1.6.*
"""
name = 'JAGS'
aliases = ['jags']
filenames = ['*.jag', '*.bug']
## JAGS
_FUNCTIONS = [
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cos', 'cosh', 'cloglog',
'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact',
'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh',
'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin',
'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse',
'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan',
# Truncation/Censoring (should I include)
'T', 'I']
# Distributions with density, probability and quartile functions
_DISTRIBUTIONS = ['[dpq]%s' % x for x in
['bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp',
'df', 'gamma', 'gen.gamma', 'logis', 'lnorm',
'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib']]
# Other distributions without density and probability
_OTHER_DISTRIBUTIONS = [
'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper',
'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq',
'dnbinom', 'dweibull', 'ddirich']
tokens = {
'whitespace' : [
(r"\s+", Text),
],
'names' : [
# Regular variable names
(r'[a-zA-Z][a-zA-Z0-9_.]*\b', Name),
],
'comments' : [
# do not use stateful comments
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(model|data)(\s+)({)',
bygroups(Keyword.Namespace, Text, Punctuation)),
(r'var(?![0-9a-zA-Z\._])', Keyword.Declaration),
# Reserved Words
(r'(for|in)(?![0-9a-zA-Z\._])', Keyword.Reserved),
# Builtins
# Need to use lookahead because . is a valid char
(r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS
+ _DISTRIBUTIONS
+ _OTHER_DISTRIBUTIONS),
Name.Builtin),
# Names
include('names'),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
(r'<-|~', Operator),
# # JAGS includes many more than OpenBUGS
(r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator),
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r'^\s*model\s*\{', text, re.M):
if re.search(r'^\s*data\s*\{', text, re.M):
return 0.9
elif re.search(r'^\s*var', text, re.M):
return 0.9
else:
return 0.3
else:
return 0
class StanLexer(RegexLexer):
"""
Pygments Lexer for Stan models.
*New in Pygments 1.6.*
"""
name = 'Stan'
aliases = ['stan']
filenames = ['*.stan']
_RESERVED = ('for', 'in', 'while', 'repeat', 'until', 'if',
'then', 'else', 'true', 'false', 'T',
'lower', 'upper', 'print')
_TYPES = ('int', 'real', 'vector', 'simplex', 'ordered', 'row_vector',
'matrix', 'corr_matrix', 'cov_matrix', 'positive_ordered')
tokens = {
'whitespace' : [
(r"\s+", Text),
],
'comments' : [
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Comments
(r'(//|#).*$', Comment.Single),
],
'root': [
# Stan is more restrictive on strings than this regex
(r'"[^"]*"', String),
# Comments
include('comments'),
# block start
include('whitespace'),
# Block start
(r'(%s)(\s*)({)' %
r'|'.join(('data', r'transformed\s+?data',
'parameters', r'transformed\s+parameters',
'model', r'generated\s+quantities')),
bygroups(Keyword.Namespace, Text, Punctuation)),
# Reserved Words
(r'(%s)\b' % r'|'.join(_RESERVED), Keyword.Reserved),
# Data types
(r'(%s)\b' % r'|'.join(_TYPES), Keyword.Type),
# Punctuation
(r"[;:,\[\]()<>]", Punctuation),
# Builtin
(r'(%s)(?=\s*\()'
% r'|'.join(_stan_builtins.FUNCTIONS
+ _stan_builtins.DISTRIBUTIONS),
Name.Builtin),
(r'(%s)(?=\s*\()'
% r'|'.join(_stan_builtins.CONSTANTS), Keyword.Constant),
# Special names ending in __, like lp__
(r'[A-Za-z][A-Za-z0-9_]*__\b', Name.Builtin.Pseudo),
# Regular variable names
(r'[A-Za-z][A-Za-z0-9_]*\b', Name),
# Real Literals
(r'-?[0-9]+(\.[0-9]+)?[eE]-?[0-9]+', Number.Float),
(r'-?[0-9]*\.[0-9]*', Number.Float),
# Integer Literals
(r'-?[0-9]+', Number.Integer),
# Assignment operators
# SLexer makes these tokens Operators.
(r'<-|~', Operator),
# Infix and prefix operators (and = )
(r"\+|-|\.?\*|\.?/|\\|'|=", Operator),
# Block delimiters
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r'^\s*parameters\s*\{', text, re.M):
return 1.0
else:
return 0.0
class IDLLexer(RegexLexer):
"""
Pygments Lexer for IDL (Interactive Data Language).
*New in Pygments 1.6.*
"""
name = 'IDL'
aliases = ['idl']
filenames = ['*.pro']
mimetypes = ['text/idl']
_RESERVED = ['and', 'begin', 'break', 'case', 'common', 'compile_opt',
'continue', 'do', 'else', 'end', 'endcase', 'elseelse',
'endfor', 'endforeach', 'endif', 'endrep', 'endswitch',
'endwhile', 'eq', 'for', 'foreach', 'forward_function',
'function', 'ge', 'goto', 'gt', 'if', 'inherits', 'le',
'lt', 'mod', 'ne', 'not', 'of', 'on_ioerror', 'or', 'pro',
'repeat', 'switch', 'then', 'until', 'while', 'xor']
"""Reserved words from: http://www.exelisvis.com/docs/reswords.html"""
_BUILTIN_LIB = ['abs', 'acos', 'adapt_hist_equal', 'alog', 'alog10',
'amoeba', 'annotate', 'app_user_dir', 'app_user_dir_query',
'arg_present', 'array_equal', 'array_indices', 'arrow',
'ascii_template', 'asin', 'assoc', 'atan', 'axis',
'a_correlate', 'bandpass_filter', 'bandreject_filter',
'barplot', 'bar_plot', 'beseli', 'beselj', 'beselk',
'besely', 'beta', 'bilinear', 'binary_template', 'bindgen',
'binomial', 'bin_date', 'bit_ffs', 'bit_population',
'blas_axpy', 'blk_con', 'box_cursor', 'breakpoint',
'broyden', 'butterworth', 'bytarr', 'byte', 'byteorder',
'bytscl', 'caldat', 'calendar', 'call_external',
'call_function', 'call_method', 'call_procedure', 'canny',
'catch', 'cd', 'cdf_[0-9a-za-z_]*', 'ceil', 'chebyshev',
'check_math',
'chisqr_cvf', 'chisqr_pdf', 'choldc', 'cholsol', 'cindgen',
'cir_3pnt', 'close', 'cluster', 'cluster_tree', 'clust_wts',
'cmyk_convert', 'colorbar', 'colorize_sample',
'colormap_applicable', 'colormap_gradient',
'colormap_rotation', 'colortable', 'color_convert',
'color_exchange', 'color_quan', 'color_range_map', 'comfit',
'command_line_args', 'complex', 'complexarr', 'complexround',
'compute_mesh_normals', 'cond', 'congrid', 'conj',
'constrained_min', 'contour', 'convert_coord', 'convol',
'convol_fft', 'coord2to3', 'copy_lun', 'correlate', 'cos',
'cosh', 'cpu', 'cramer', 'create_cursor', 'create_struct',
'create_view', 'crossp', 'crvlength', 'cti_test',
'ct_luminance', 'cursor', 'curvefit', 'cvttobm', 'cv_coord',
'cw_animate', 'cw_animate_getp', 'cw_animate_load',
'cw_animate_run', 'cw_arcball', 'cw_bgroup', 'cw_clr_index',
'cw_colorsel', 'cw_defroi', 'cw_field', 'cw_filesel',
'cw_form', 'cw_fslider', 'cw_light_editor',
'cw_light_editor_get', 'cw_light_editor_set', 'cw_orient',
'cw_palette_editor', 'cw_palette_editor_get',
'cw_palette_editor_set', 'cw_pdmenu', 'cw_rgbslider',
'cw_tmpl', 'cw_zoom', 'c_correlate', 'dblarr', 'db_exists',
'dcindgen', 'dcomplex', 'dcomplexarr', 'define_key',
'define_msgblk', 'define_msgblk_from_file', 'defroi',
'defsysv', 'delvar', 'dendrogram', 'dendro_plot', 'deriv',
'derivsig', 'determ', 'device', 'dfpmin', 'diag_matrix',
'dialog_dbconnect', 'dialog_message', 'dialog_pickfile',
'dialog_printersetup', 'dialog_printjob',
'dialog_read_image', 'dialog_write_image', 'digital_filter',
'dilate', 'dindgen', 'dissolve', 'dist', 'distance_measure',
'dlm_load', 'dlm_register', 'doc_library', 'double',
'draw_roi', 'edge_dog', 'efont', 'eigenql', 'eigenvec',
'ellipse', 'elmhes', 'emboss', 'empty', 'enable_sysrtn',
'eof', 'eos_[0-9a-za-z_]*', 'erase', 'erf', 'erfc', 'erfcx',
'erode', 'errorplot', 'errplot', 'estimator_filter',
'execute', 'exit', 'exp', 'expand', 'expand_path', 'expint',
'extrac', 'extract_slice', 'factorial', 'fft', 'filepath',
'file_basename', 'file_chmod', 'file_copy', 'file_delete',
'file_dirname', 'file_expand_path', 'file_info',
'file_lines', 'file_link', 'file_mkdir', 'file_move',
'file_poll_input', 'file_readlink', 'file_same',
'file_search', 'file_test', 'file_which', 'findgen',
'finite', 'fix', 'flick', 'float', 'floor', 'flow3',
'fltarr', 'flush', 'format_axis_values', 'free_lun',
'fstat', 'fulstr', 'funct', 'fv_test', 'fx_root',
'fz_roots', 'f_cvf', 'f_pdf', 'gamma', 'gamma_ct',
'gauss2dfit', 'gaussfit', 'gaussian_function', 'gaussint',
'gauss_cvf', 'gauss_pdf', 'gauss_smooth', 'getenv',
'getwindows', 'get_drive_list', 'get_dxf_objects',
'get_kbrd', 'get_login_info', 'get_lun', 'get_screen_size',
'greg2jul', 'grib_[0-9a-za-z_]*', 'grid3', 'griddata',
'grid_input', 'grid_tps', 'gs_iter',
'h5[adfgirst]_[0-9a-za-z_]*', 'h5_browser', 'h5_close',
'h5_create', 'h5_get_libversion', 'h5_open', 'h5_parse',
'hanning', 'hash', 'hdf_[0-9a-za-z_]*', 'heap_free',
'heap_gc', 'heap_nosave', 'heap_refcount', 'heap_save',
'help', 'hilbert', 'histogram', 'hist_2d', 'hist_equal',
'hls', 'hough', 'hqr', 'hsv', 'h_eq_ct', 'h_eq_int',
'i18n_multibytetoutf8', 'i18n_multibytetowidechar',
'i18n_utf8tomultibyte', 'i18n_widechartomultibyte',
'ibeta', 'icontour', 'iconvertcoord', 'idelete', 'identity',
'idlexbr_assistant', 'idlitsys_createtool', 'idl_base64',
'idl_validname', 'iellipse', 'igamma', 'igetcurrent',
'igetdata', 'igetid', 'igetproperty', 'iimage', 'image',
'image_cont', 'image_statistics', 'imaginary', 'imap',
'indgen', 'intarr', 'interpol', 'interpolate',
'interval_volume', 'int_2d', 'int_3d', 'int_tabulated',
'invert', 'ioctl', 'iopen', 'iplot', 'ipolygon',
'ipolyline', 'iputdata', 'iregister', 'ireset', 'iresolve',
'irotate', 'ir_filter', 'isa', 'isave', 'iscale',
'isetcurrent', 'isetproperty', 'ishft', 'isocontour',
'isosurface', 'isurface', 'itext', 'itranslate', 'ivector',
'ivolume', 'izoom', 'i_beta', 'journal', 'json_parse',
'json_serialize', 'jul2greg', 'julday', 'keyword_set',
'krig2d', 'kurtosis', 'kw_test', 'l64indgen', 'label_date',
'label_region', 'ladfit', 'laguerre', 'laplacian',
'la_choldc', 'la_cholmprove', 'la_cholsol', 'la_determ',
'la_eigenproblem', 'la_eigenql', 'la_eigenvec', 'la_elmhes',
'la_gm_linear_model', 'la_hqr', 'la_invert',
'la_least_squares', 'la_least_square_equality',
'la_linear_equation', 'la_ludc', 'la_lumprove', 'la_lusol',
'la_svd', 'la_tridc', 'la_trimprove', 'la_triql',
'la_trired', 'la_trisol', 'least_squares_filter', 'leefilt',
'legend', 'legendre', 'linbcg', 'lindgen', 'linfit',
'linkimage', 'list', 'll_arc_distance', 'lmfit', 'lmgr',
'lngamma', 'lnp_test', 'loadct', 'locale_get',
'logical_and', 'logical_or', 'logical_true', 'lon64arr',
'lonarr', 'long', 'long64', 'lsode', 'ludc', 'lumprove',
'lusol', 'lu_complex', 'machar', 'make_array', 'make_dll',
'make_rt', 'map', 'mapcontinents', 'mapgrid', 'map_2points',
'map_continents', 'map_grid', 'map_image', 'map_patch',
'map_proj_forward', 'map_proj_image', 'map_proj_info',
'map_proj_init', 'map_proj_inverse', 'map_set',
'matrix_multiply', 'matrix_power', 'max', 'md_test',
'mean', 'meanabsdev', 'mean_filter', 'median', 'memory',
'mesh_clip', 'mesh_decimate', 'mesh_issolid', 'mesh_merge',
'mesh_numtriangles', 'mesh_obj', 'mesh_smooth',
'mesh_surfacearea', 'mesh_validate', 'mesh_volume',
'message', 'min', 'min_curve_surf', 'mk_html_help',
'modifyct', 'moment', 'morph_close', 'morph_distance',
'morph_gradient', 'morph_hitormiss', 'morph_open',
'morph_thin', 'morph_tophat', 'multi', 'm_correlate',
'ncdf_[0-9a-za-z_]*', 'newton', 'noise_hurl', 'noise_pick',
'noise_scatter', 'noise_slur', 'norm', 'n_elements',
'n_params', 'n_tags', 'objarr', 'obj_class', 'obj_destroy',
'obj_hasmethod', 'obj_isa', 'obj_new', 'obj_valid',
'online_help', 'on_error', 'open', 'oplot', 'oploterr',
'parse_url', 'particle_trace', 'path_cache', 'path_sep',
'pcomp', 'plot', 'plot3d', 'ploterr', 'plots', 'plot_3dbox',
'plot_field', 'pnt_line', 'point_lun', 'polarplot',
'polar_contour', 'polar_surface', 'poly', 'polyfill',
'polyfillv', 'polygon', 'polyline', 'polyshade', 'polywarp',
'poly_2d', 'poly_area', 'poly_fit', 'popd', 'powell',
'pref_commit', 'pref_get', 'pref_set', 'prewitt', 'primes',
'print', 'printd', 'product', 'profile', 'profiler',
'profiles', 'project_vol', 'psafm', 'pseudo',
'ps_show_fonts', 'ptrarr', 'ptr_free', 'ptr_new',
'ptr_valid', 'pushd', 'p_correlate', 'qgrid3', 'qhull',
'qromb', 'qromo', 'qsimp', 'query_ascii', 'query_bmp',
'query_csv', 'query_dicom', 'query_gif', 'query_image',
'query_jpeg', 'query_jpeg2000', 'query_mrsid', 'query_pict',
'query_png', 'query_ppm', 'query_srf', 'query_tiff',
'query_wav', 'radon', 'randomn', 'randomu', 'ranks',
'rdpix', 'read', 'reads', 'readu', 'read_ascii',
'read_binary', 'read_bmp', 'read_csv', 'read_dicom',
'read_gif', 'read_image', 'read_interfile', 'read_jpeg',
'read_jpeg2000', 'read_mrsid', 'read_pict', 'read_png',
'read_ppm', 'read_spr', 'read_srf', 'read_sylk',
'read_tiff', 'read_wav', 'read_wave', 'read_x11_bitmap',
'read_xwd', 'real_part', 'rebin', 'recall_commands',
'recon3', 'reduce_colors', 'reform', 'region_grow',
'register_cursor', 'regress', 'replicate',
'replicate_inplace', 'resolve_all', 'resolve_routine',
'restore', 'retall', 'return', 'reverse', 'rk4', 'roberts',
'rot', 'rotate', 'round', 'routine_filepath',
'routine_info', 'rs_test', 'r_correlate', 'r_test',
'save', 'savgol', 'scale3', 'scale3d', 'scope_level',
'scope_traceback', 'scope_varfetch', 'scope_varname',
'search2d', 'search3d', 'sem_create', 'sem_delete',
'sem_lock', 'sem_release', 'setenv', 'set_plot',
'set_shading', 'sfit', 'shade_surf', 'shade_surf_irr',
'shade_volume', 'shift', 'shift_diff', 'shmdebug', 'shmmap',
'shmunmap', 'shmvar', 'show3', 'showfont', 'simplex', 'sin',
'sindgen', 'sinh', 'size', 'skewness', 'skip_lun',
'slicer3', 'slide_image', 'smooth', 'sobel', 'socket',
'sort', 'spawn', 'spher_harm', 'sph_4pnt', 'sph_scat',
'spline', 'spline_p', 'spl_init', 'spl_interp', 'sprsab',
'sprsax', 'sprsin', 'sprstp', 'sqrt', 'standardize',
'stddev', 'stop', 'strarr', 'strcmp', 'strcompress',
'streamline', 'stregex', 'stretch', 'string', 'strjoin',
'strlen', 'strlowcase', 'strmatch', 'strmessage', 'strmid',
'strpos', 'strput', 'strsplit', 'strtrim', 'struct_assign',
'struct_hide', 'strupcase', 'surface', 'surfr', 'svdc',
'svdfit', 'svsol', 'swap_endian', 'swap_endian_inplace',
'symbol', 'systime', 's_test', 't3d', 'tag_names', 'tan',
'tanh', 'tek_color', 'temporary', 'tetra_clip',
'tetra_surface', 'tetra_volume', 'text', 'thin', 'threed',
'timegen', 'time_test2', 'tm_test', 'total', 'trace',
'transpose', 'triangulate', 'trigrid', 'triql', 'trired',
'trisol', 'tri_surf', 'truncate_lun', 'ts_coef', 'ts_diff',
'ts_fcast', 'ts_smooth', 'tv', 'tvcrs', 'tvlct', 'tvrd',
'tvscl', 'typename', 't_cvt', 't_pdf', 'uindgen', 'uint',
'uintarr', 'ul64indgen', 'ulindgen', 'ulon64arr', 'ulonarr',
'ulong', 'ulong64', 'uniq', 'unsharp_mask', 'usersym',
'value_locate', 'variance', 'vector', 'vector_field', 'vel',
'velovect', 'vert_t3d', 'voigt', 'voronoi', 'voxel_proj',
'wait', 'warp_tri', 'watershed', 'wdelete', 'wf_draw',
'where', 'widget_base', 'widget_button', 'widget_combobox',
'widget_control', 'widget_displaycontextmen', 'widget_draw',
'widget_droplist', 'widget_event', 'widget_info',
'widget_label', 'widget_list', 'widget_propertysheet',
'widget_slider', 'widget_tab', 'widget_table',
'widget_text', 'widget_tree', 'widget_tree_move',
'widget_window', 'wiener_filter', 'window', 'writeu',
'write_bmp', 'write_csv', 'write_gif', 'write_image',
'write_jpeg', 'write_jpeg2000', 'write_nrif', 'write_pict',
'write_png', 'write_ppm', 'write_spr', 'write_srf',
'write_sylk', 'write_tiff', 'write_wav', 'write_wave',
'wset', 'wshow', 'wtn', 'wv_applet', 'wv_cwt',
'wv_cw_wavelet', 'wv_denoise', 'wv_dwt', 'wv_fn_coiflet',
'wv_fn_daubechies', 'wv_fn_gaussian', 'wv_fn_haar',
'wv_fn_morlet', 'wv_fn_paul', 'wv_fn_symlet',
'wv_import_data', 'wv_import_wavelet', 'wv_plot3d_wps',
'wv_plot_multires', 'wv_pwt', 'wv_tool_denoise',
'xbm_edit', 'xdisplayfile', 'xdxf', 'xfont',
'xinteranimate', 'xloadct', 'xmanager', 'xmng_tmpl',
'xmtool', 'xobjview', 'xobjview_rotate',
'xobjview_write_image', 'xpalette', 'xpcolor', 'xplot3d',
'xregistered', 'xroi', 'xsq_test', 'xsurface', 'xvaredit',
'xvolume', 'xvolume_rotate', 'xvolume_write_image',
'xyouts', 'zoom', 'zoom_24']
"""Functions from: http://www.exelisvis.com/docs/routines-1.html"""
tokens = {
'root': [
(r'^\s*;.*?\n', Comment.Singleline),
(r'\b(' + '|'.join(_RESERVED) + r')\b', Keyword),
(r'\b(' + '|'.join(_BUILTIN_LIB) + r')\b', Name.Builtin),
(r'\+=|-=|\^=|\*=|/=|#=|##=|<=|>=|=', Operator),
(r'\+\+|--|->|\+|-|##|#|\*|/|<|>|&&|\^|~|\|\|\?|:', Operator),
(r'\b(mod=|lt=|le=|eq=|ne=|ge=|gt=|not=|and=|or=|xor=)', Operator),
(r'\b(mod|lt|le|eq|ne|ge|gt|not|and|or|xor)\b', Operator),
(r'\b[0-9](L|B|S|UL|ULL|LL)?\b', Number),
(r'.', Text),
]
}
class RdLexer(RegexLexer):
"""
Pygments Lexer for R documentation (Rd) files
This is a very minimal implementation, highlighting little more
than the macros. A description of Rd syntax is found in `Writing R
Extensions <http://cran.r-project.org/doc/manuals/R-exts.html>`_
and `Parsing Rd files <developer.r-project.org/parseRd.pdf>`_.
*New in Pygments 1.6.*
"""
name = 'Rd'
aliases = ['rd']
filenames = ['*.Rd']
mimetypes = ['text/x-r-doc']
# To account for verbatim / LaTeX-like / and R-like areas
# would require parsing.
tokens = {
'root' : [
# catch escaped brackets and percent sign
(r'\\[\\{}%]', String.Escape),
# comments
(r'%.*$', Comment),
# special macros with no arguments
(r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant),
# macros
(r'\\[a-zA-Z]+\b', Keyword),
# special preprocessor macros
(r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc),
# non-escaped brackets
(r'[{}]', Name.Builtin),
# everything else
(r'[^\\%\n{}]+', Text),
(r'.', Text),
]
}
| mit |
c0defreak/python-for-android | python3-alpha/python3-src/Lib/idlelib/HyperParser.py | 89 | 10310 | """
HyperParser
===========
This module defines the HyperParser class, which provides advanced parsing
abilities for the ParenMatch and other extensions.
The HyperParser uses PyParser. PyParser is intended mostly to give information
on the proper indentation of code. HyperParser gives some information on the
structure of code, used by extensions to help the user.
"""
import string
import keyword
from idlelib import PyParse
class HyperParser:
def __init__(self, editwin, index):
"""Initialize the HyperParser to analyze the surroundings of the given
index.
"""
self.editwin = editwin
self.text = text = editwin.text
parser = PyParse.Parser(editwin.indentwidth, editwin.tabwidth)
def index2line(index):
return int(float(index))
lno = index2line(text.index(index))
if not editwin.context_use_ps1:
for context in editwin.num_context_lines:
startat = max(lno - context, 1)
startatindex = repr(startat) + ".0"
stopatindex = "%d.end" % lno
# We add the newline because PyParse requires a newline at end.
# We add a space so that index won't be at end of line, so that
# its status will be the same as the char before it, if should.
parser.set_str(text.get(startatindex, stopatindex)+' \n')
bod = parser.find_good_parse_start(
editwin._build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
parser.set_lo(bod or 0)
else:
r = text.tag_prevrange("console", index)
if r:
startatindex = r[1]
else:
startatindex = "1.0"
stopatindex = "%d.end" % lno
# We add the newline because PyParse requires a newline at end.
# We add a space so that index won't be at end of line, so that
# its status will be the same as the char before it, if should.
parser.set_str(text.get(startatindex, stopatindex)+' \n')
parser.set_lo(0)
# We want what the parser has, except for the last newline and space.
self.rawtext = parser.str[:-2]
# As far as I can see, parser.str preserves the statement we are in,
# so that stopatindex can be used to synchronize the string with the
# text box indices.
self.stopatindex = stopatindex
self.bracketing = parser.get_last_stmt_bracketing()
# find which pairs of bracketing are openers. These always correspond
# to a character of rawtext.
self.isopener = [i>0 and self.bracketing[i][1] > self.bracketing[i-1][1]
for i in range(len(self.bracketing))]
self.set_index(index)
def set_index(self, index):
"""Set the index to which the functions relate. Note that it must be
in the same statement.
"""
indexinrawtext = \
len(self.rawtext) - len(self.text.get(index, self.stopatindex))
if indexinrawtext < 0:
raise ValueError("The index given is before the analyzed statement")
self.indexinrawtext = indexinrawtext
# find the rightmost bracket to which index belongs
self.indexbracket = 0
while self.indexbracket < len(self.bracketing)-1 and \
self.bracketing[self.indexbracket+1][0] < self.indexinrawtext:
self.indexbracket += 1
if self.indexbracket < len(self.bracketing)-1 and \
self.bracketing[self.indexbracket+1][0] == self.indexinrawtext and \
not self.isopener[self.indexbracket+1]:
self.indexbracket += 1
def is_in_string(self):
"""Is the index given to the HyperParser is in a string?"""
# The bracket to which we belong should be an opener.
# If it's an opener, it has to have a character.
return self.isopener[self.indexbracket] and \
self.rawtext[self.bracketing[self.indexbracket][0]] in ('"', "'")
def is_in_code(self):
"""Is the index given to the HyperParser is in a normal code?"""
return not self.isopener[self.indexbracket] or \
self.rawtext[self.bracketing[self.indexbracket][0]] not in \
('#', '"', "'")
def get_surrounding_brackets(self, openers='([{', mustclose=False):
"""If the index given to the HyperParser is surrounded by a bracket
defined in openers (or at least has one before it), return the
indices of the opening bracket and the closing bracket (or the
end of line, whichever comes first).
If it is not surrounded by brackets, or the end of line comes before
the closing bracket and mustclose is True, returns None.
"""
bracketinglevel = self.bracketing[self.indexbracket][1]
before = self.indexbracket
while not self.isopener[before] or \
self.rawtext[self.bracketing[before][0]] not in openers or \
self.bracketing[before][1] > bracketinglevel:
before -= 1
if before < 0:
return None
bracketinglevel = min(bracketinglevel, self.bracketing[before][1])
after = self.indexbracket + 1
while after < len(self.bracketing) and \
self.bracketing[after][1] >= bracketinglevel:
after += 1
beforeindex = self.text.index("%s-%dc" %
(self.stopatindex, len(self.rawtext)-self.bracketing[before][0]))
if after >= len(self.bracketing) or \
self.bracketing[after][0] > len(self.rawtext):
if mustclose:
return None
afterindex = self.stopatindex
else:
# We are after a real char, so it is a ')' and we give the index
# before it.
afterindex = self.text.index("%s-%dc" %
(self.stopatindex,
len(self.rawtext)-(self.bracketing[after][0]-1)))
return beforeindex, afterindex
# This string includes all chars that may be in a white space
_whitespace_chars = " \t\n\\"
# This string includes all chars that may be in an identifier
_id_chars = string.ascii_letters + string.digits + "_"
# This string includes all chars that may be the first char of an identifier
_id_first_chars = string.ascii_letters + "_"
# Given a string and pos, return the number of chars in the identifier
# which ends at pos, or 0 if there is no such one. Saved words are not
# identifiers.
def _eat_identifier(self, str, limit, pos):
i = pos
while i > limit and str[i-1] in self._id_chars:
i -= 1
if i < pos and (str[i] not in self._id_first_chars or \
keyword.iskeyword(str[i:pos])):
i = pos
return pos - i
def get_expression(self):
"""Return a string with the Python expression which ends at the given
index, which is empty if there is no real one.
"""
if not self.is_in_code():
raise ValueError("get_expression should only be called if index "\
"is inside a code.")
rawtext = self.rawtext
bracketing = self.bracketing
brck_index = self.indexbracket
brck_limit = bracketing[brck_index][0]
pos = self.indexinrawtext
last_identifier_pos = pos
postdot_phase = True
while 1:
# Eat whitespaces, comments, and if postdot_phase is False - one dot
while 1:
if pos>brck_limit and rawtext[pos-1] in self._whitespace_chars:
# Eat a whitespace
pos -= 1
elif not postdot_phase and \
pos > brck_limit and rawtext[pos-1] == '.':
# Eat a dot
pos -= 1
postdot_phase = True
# The next line will fail if we are *inside* a comment, but we
# shouldn't be.
elif pos == brck_limit and brck_index > 0 and \
rawtext[bracketing[brck_index-1][0]] == '#':
# Eat a comment
brck_index -= 2
brck_limit = bracketing[brck_index][0]
pos = bracketing[brck_index+1][0]
else:
# If we didn't eat anything, quit.
break
if not postdot_phase:
# We didn't find a dot, so the expression end at the last
# identifier pos.
break
ret = self._eat_identifier(rawtext, brck_limit, pos)
if ret:
# There is an identifier to eat
pos = pos - ret
last_identifier_pos = pos
# Now, in order to continue the search, we must find a dot.
postdot_phase = False
# (the loop continues now)
elif pos == brck_limit:
# We are at a bracketing limit. If it is a closing bracket,
# eat the bracket, otherwise, stop the search.
level = bracketing[brck_index][1]
while brck_index > 0 and bracketing[brck_index-1][1] > level:
brck_index -= 1
if bracketing[brck_index][0] == brck_limit:
# We were not at the end of a closing bracket
break
pos = bracketing[brck_index][0]
brck_index -= 1
brck_limit = bracketing[brck_index][0]
last_identifier_pos = pos
if rawtext[pos] in "([":
# [] and () may be used after an identifier, so we
# continue. postdot_phase is True, so we don't allow a dot.
pass
else:
# We can't continue after other types of brackets
break
else:
# We've found an operator or something.
break
return rawtext[last_identifier_pos:self.indexinrawtext]
| apache-2.0 |
yogeshVU/matplotlib_apps | timeline_bar.py | 1 | 2447 | #this line prepares IPython for working with matplotlib
%matplotlib inline
# # this actually imports matplotlib
# import numpy as np
# import matplotlib.pyplot as plt
# x = np.linspace(0, 10, 30) #array of 30 points from 0 to 10
# y = np.sin(x)
# z = y + np.random.normal(size=30) * .2
# plt.plot(x, y, 'b+:', label='A sine wave')
# #plt.axhspan(0, 0.25, 0, 1, hold=None, hatch='/')
# plt.axvline(x=3, ymin=0, ymax=1, hold=None,linewidth=4, color='r')
# #plt.axvspan(0, 5, 0, 1, hold=None)
# plt.bar(0, 0.25, width=1, bottom=None, hold=None, data=None)
# #plt.plot(x, z, 'b-', label='Noisy sine')
# plt.legend(loc = 'lower right')
# plt.xlabel("X axis")
# plt.ylabel("Y axis")
# plt.show()
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.set_ylim(0, 50)
ax.set_xlim(0, 200)
#ax.broken_barh([(10, 50), (100, 20), (130, 10)], (20, 9),
# facecolors=('red', 'yellow', 'green'))
ax.set_xlabel('seconds since start')
ax.set_ylabel('Simulation')
ax.set_yticks([5,15, 25])
ax.set_yticklabels(['S1','S2', 'S3'])
bars_collection_xx =[(100, 20),(150, 10)]
bars_collection_yy = (25,5)
for xx in bars_collection_xx:
print xx,bars_collection_yy
ax.broken_barh([xx], bars_collection_yy, facecolors='red',hatch='xxx')
bars_collection_xx =[(100, 20),(150, 10)]
bars_collection_yy = (5,5)
for xx in bars_collection_xx:
print xx,bars_collection_yy
ax.broken_barh([xx], bars_collection_yy, facecolors='red',hatch='xxx')
bars_collection_xx =[(100, 20),(150, 10)]
bars_collection_yy = (15,5)
for xx in bars_collection_xx:
print xx,bars_collection_yy
ax.broken_barh([xx], bars_collection_yy, facecolors='red',hatch='xxx')
#ax.broken_barh([(100, 10), (150, 10)], (25, 5), facecolors='red',hatch='xxx')
#ax.broken_barh([(100, 10), (150, 10)], (5, 5), facecolors='blue',hatch='xxx')
#ax.broken_barh([(100, 10), (150, 10)], (15, 5), facecolors='green',hatch='xxx')
plt.axvline(x=10, ymin=0, ymax=1, hold=None)
ax.grid(True)
ax.annotate('race interrupted', (61, 25),
xytext=(0.8, 0.9), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.05),
fontsize=16,
horizontalalignment='right', verticalalignment='top')
xposition = [20, 50, 100]
for xc in xposition:
plt.axvline(x=xc, color='r', linestyle='-')
yposition = [5, 15, 25]
for yc in yposition:
plt.axhline(y=yc, color='b', linestyle='-')
plt.show()
| mit |
belmiromoreira/nova | nova/virt/vmwareapi/host.py | 60 | 3232 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for host-related functions (start, reboot, etc).
"""
from oslo_utils import units
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import vm_mode
from nova import exception
from nova import utils
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
def _get_ds_capacity_and_freespace(session, cluster=None,
datastore_regex=None):
try:
ds = ds_util.get_datastore(session, cluster,
datastore_regex)
return ds.capacity, ds.freespace
except exception.DatastoreNotFound:
return 0, 0
class VCState(object):
"""Manages information about the VC host this compute
node is running on.
"""
def __init__(self, session, host_name, cluster, datastore_regex):
super(VCState, self).__init__()
self._session = session
self._host_name = host_name
self._cluster = cluster
self._datastore_regex = datastore_regex
self._stats = {}
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host. If 'refresh' is
True, run the update first.
"""
if refresh or not self._stats:
self.update_status()
return self._stats
def update_status(self):
"""Update the current state of the cluster."""
capacity, freespace = _get_ds_capacity_and_freespace(self._session,
self._cluster, self._datastore_regex)
# Get cpu, memory stats from the cluster
stats = vm_util.get_stats_from_cluster(self._session, self._cluster)
about_info = self._session._call_method(vim_util, "get_about_info")
data = {}
data["vcpus"] = stats['vcpus']
data["disk_total"] = capacity / units.Gi
data["disk_available"] = freespace / units.Gi
data["disk_used"] = data["disk_total"] - data["disk_available"]
data["host_memory_total"] = stats['mem']['total']
data["host_memory_free"] = stats['mem']['free']
data["hypervisor_type"] = about_info.name
data["hypervisor_version"] = utils.convert_version_to_int(
str(about_info.version))
data["hypervisor_hostname"] = self._host_name
data["supported_instances"] = [
(arch.I686, hv_type.VMWARE, vm_mode.HVM),
(arch.X86_64, hv_type.VMWARE, vm_mode.HVM)]
self._stats = data
return data
| apache-2.0 |
ericlink/adms-server | playframework-dist/1.1-src/python/Lib/compiler/syntax.py | 25 | 1490 | """Check for errs in the AST.
The Python parser does not catch all syntax errors. Others, like
assignments with invalid targets, are caught in the code generation
phase.
The compiler package catches some errors in the transformer module.
But it seems clearer to write checkers that use the AST to detect
errors.
"""
from compiler import ast, walk
def check(tree, multi=None):
v = SyntaxErrorChecker(multi)
walk(tree, v)
return v.errors
class SyntaxErrorChecker:
"""A visitor to find syntax errors in the AST."""
def __init__(self, multi=None):
"""Create new visitor object.
If optional argument multi is not None, then print messages
for each error rather than raising a SyntaxError for the
first.
"""
self.multi = multi
self.errors = 0
def error(self, node, msg):
self.errors = self.errors + 1
if self.multi is not None:
print "%s:%s: %s" % (node.filename, node.lineno, msg)
else:
raise SyntaxError, "%s (%s:%s)" % (msg, node.filename, node.lineno)
def visitAssign(self, node):
# the transformer module handles many of these
pass
## for target in node.nodes:
## if isinstance(target, ast.AssList):
## if target.lineno is None:
## target.lineno = node.lineno
## self.error(target, "can't assign to list comprehension")
| mit |
wkschwartz/django | django/conf/locale/en_AU/formats.py | 35 | 1889 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j M Y' # '25 Oct 2006'
TIME_FORMAT = 'P' # '2:30 p.m.'
DATETIME_FORMAT = 'j M Y, P' # '25 Oct 2006, 2:30 p.m.'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'j F' # '25 October'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 p.m.'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.