repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
stewnorriss/LibCloud | libcloud/test/compute/test_ssh_client.py | 17 | 11868 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import with_statement
import os
import sys
import tempfile
from libcloud import _init_once
from libcloud.test import LibcloudTestCase
from libcloud.test import unittest
from libcloud.compute.ssh import ParamikoSSHClient
from libcloud.compute.ssh import ShellOutSSHClient
from libcloud.compute.ssh import have_paramiko
from libcloud.utils.py3 import StringIO
from mock import patch, Mock
if not have_paramiko:
ParamikoSSHClient = None # NOQA
else:
import paramiko
class ParamikoSSHClientTests(LibcloudTestCase):
@patch('paramiko.SSHClient', Mock)
def setUp(self):
"""
Creates the object patching the actual connection.
"""
conn_params = {'hostname': 'dummy.host.org',
'port': 8822,
'username': 'ubuntu',
'key': '~/.ssh/ubuntu_ssh',
'timeout': '600'}
_, self.tmp_file = tempfile.mkstemp()
os.environ['LIBCLOUD_DEBUG'] = self.tmp_file
_init_once()
self.ssh_cli = ParamikoSSHClient(**conn_params)
@patch('paramiko.SSHClient', Mock)
def test_create_with_password(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'password': 'ubuntu'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'password': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
@patch('paramiko.SSHClient', Mock)
def test_deprecated_key_argument(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'key_filename': 'id_rsa',
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
def test_key_files_and_key_material_arguments_are_mutual_exclusive(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_files': 'id_rsa',
'key_material': 'key'}
expected_msg = ('key_files and key_material arguments are mutually '
'exclusive')
self.assertRaisesRegexp(ValueError, expected_msg,
ParamikoSSHClient, **conn_params)
@patch('paramiko.SSHClient', Mock)
def test_key_material_argument(self):
path = os.path.join(os.path.dirname(__file__),
'fixtures', 'misc', 'dummy_rsa')
with open(path, 'r') as fp:
private_key = fp.read()
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_material': private_key}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
pkey = paramiko.RSAKey.from_private_key(StringIO(private_key))
expected_conn = {'username': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'pkey': pkey,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
@patch('paramiko.SSHClient', Mock)
def test_key_material_argument_invalid_key(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_material': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
expected_msg = 'Invalid or unsupported key type'
self.assertRaisesRegexp(paramiko.ssh_exception.SSHException,
expected_msg, mock.connect)
@patch('paramiko.SSHClient', Mock)
def test_create_with_key(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_files': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'key_filename': 'id_rsa',
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
@patch('paramiko.SSHClient', Mock)
def test_create_with_password_and_key(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'password': 'ubuntu',
'key': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'password': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'key_filename': 'id_rsa',
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
@patch('paramiko.SSHClient', Mock)
def test_create_without_credentials(self):
"""
Initialize object with no credentials.
Just to have better coverage, initialize the object
without 'password' neither 'key'.
"""
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'hostname': 'dummy.host.org',
'allow_agent': True,
'look_for_keys': True,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
def test_basic_usage_absolute_path(self):
"""
Basic execution.
"""
mock = self.ssh_cli
# script to execute
sd = "/root/random_script.sh"
# Connect behavior
mock.connect()
mock_cli = mock.client # The actual mocked object: SSHClient
expected_conn = {'username': 'ubuntu',
'key_filename': '~/.ssh/ubuntu_ssh',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'timeout': '600',
'port': 8822}
mock_cli.connect.assert_called_once_with(**expected_conn)
mock.put(sd)
# Make assertions over 'put' method
mock_cli.open_sftp().chdir.assert_called_with('root')
mock_cli.open_sftp().file.assert_called_once_with('random_script.sh',
mode='w')
mock.run(sd)
# Make assertions over 'run' method
mock_cli.get_transport().open_session().exec_command \
.assert_called_once_with(sd)
self.assertLogMsg('Executing command (cmd=/root/random_script.sh)')
self.assertLogMsg('Command finished')
mock.close()
def test_delete_script(self):
"""
Provide a basic test with 'delete' action.
"""
mock = self.ssh_cli
# script to execute
sd = '/root/random_script.sh'
mock.connect()
mock.delete(sd)
# Make assertions over the 'delete' method
mock.client.open_sftp().unlink.assert_called_with(sd)
self.assertLogMsg('Deleting file')
mock.close()
self.assertLogMsg('Closing server connection')
def assertLogMsg(self, expected_msg):
with open(self.tmp_file, 'r') as fp:
content = fp.read()
self.assertTrue(content.find(expected_msg) != -1)
if not ParamikoSSHClient:
class ParamikoSSHClientTests(LibcloudTestCase): # NOQA
pass
class ShellOutSSHClientTests(LibcloudTestCase):
def test_password_auth_not_supported(self):
try:
ShellOutSSHClient(hostname='localhost', username='foo',
password='bar')
except ValueError:
e = sys.exc_info()[1]
msg = str(e)
self.assertTrue('ShellOutSSHClient only supports key auth' in msg)
else:
self.fail('Exception was not thrown')
def test_ssh_executable_not_available(self):
class MockChild(object):
returncode = 127
def communicate(*args, **kwargs):
pass
def mock_popen(*args, **kwargs):
return MockChild()
with patch('subprocess.Popen', mock_popen):
try:
ShellOutSSHClient(hostname='localhost', username='foo')
except ValueError:
e = sys.exc_info()[1]
msg = str(e)
self.assertTrue('ssh client is not available' in msg)
else:
self.fail('Exception was not thrown')
def test_connect_success(self):
client = ShellOutSSHClient(hostname='localhost', username='root')
self.assertTrue(client.connect())
def test_close_success(self):
client = ShellOutSSHClient(hostname='localhost', username='root')
self.assertTrue(client.close())
def test_get_base_ssh_command(self):
client1 = ShellOutSSHClient(hostname='localhost', username='root')
client2 = ShellOutSSHClient(hostname='localhost', username='root',
key='/home/my.key')
client3 = ShellOutSSHClient(hostname='localhost', username='root',
key='/home/my.key', timeout=5)
cmd1 = client1._get_base_ssh_command()
cmd2 = client2._get_base_ssh_command()
cmd3 = client3._get_base_ssh_command()
self.assertEqual(cmd1, ['ssh', 'root@localhost'])
self.assertEqual(cmd2, ['ssh', '-i', '/home/my.key',
'root@localhost'])
self.assertEqual(cmd3, ['ssh', '-i', '/home/my.key',
'-oConnectTimeout=5', 'root@localhost'])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
bougui505/SOM | PCA.py | 1 | 1348 | #!/usr/bin/env python
import numpy
#import pylab
def princomp(A,numpc=4,reconstruct=False,getEigenValues=True):
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - numpy.atleast_2d(numpy.mean(A,axis=1)).T) # subtract the mean (along columns)
# print 'A:%s'%A
# print 'M:%s'%M
# print 'cov:%s'%numpy.cov(M)
[eigenValues,eigenVectors] = numpy.linalg.eig(numpy.cov(M))
p = numpy.size(eigenVectors,axis=1)
idx = numpy.argsort(eigenValues) # sorting the eigenvalues
idx = idx[::-1] # in ascending order
# sorting eigenvectors according to the sorted eigenvalues
eigenVectors = eigenVectors[:,idx]
eigenValues = eigenValues[idx] # sorting eigenvalues
if numpc < p or numpc >= 0:
eigenVectors = eigenVectors[:,range(numpc)] # cutting some PCs
# eigenValues = eigenValues[range(numpc)]
#data reconstruction
if reconstruct:
# A_r = numpy.zeros_like(A)
# for i in range(numpc):
# A_r = A_r + eigenValues[i]*numpy.dot(numpy.atleast_2d(eigenVectors[:,i]).T,numpy.atleast_2d(eigenVectors[:,i]))
score = numpy.dot(eigenVectors.T,M) # projection of the data in the new space
Ar = (numpy.dot(eigenVectors,score)+numpy.mean(A,axis=0)).T # image reconstruction
return eigenVectors.real,eigenValues.real,Ar
else:
if getEigenValues:
return eigenVectors.real,eigenValues.real
else:
return eigenVectors.real
| gpl-2.0 |
JackKelly/neuralnilm_prototype | scripts/e368.py | 2 | 5841 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1024,
# random_window=64,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.9,
one_target_per_seq=False,
n_seq_per_batch=64,
subsample_target=4,
include_diff=False,
include_power=True,
# clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-1,
learning_rate_changes_by_iteration={
100: 1e-2,
# 400: 1e-3,
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': BLSTMLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 4,
'stride': 4,
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=100000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
levkar/odoo | addons/hw_proxy/controllers/main.py | 9 | 7344 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import commands
import logging
import time
from threading import Lock
from odoo import http
from odoo.http import request
_logger = logging.getLogger(__name__)
# Those are the builtin raspberry pi USB modules, they should
# not appear in the list of connected devices.
BANNED_DEVICES = set([
"0424:9514", # Standard Microsystem Corp. Builtin Ethernet module
"1d6b:0002", # Linux Foundation 2.0 root hub
"0424:ec00", # Standard Microsystem Corp. Other Builtin Ethernet module
])
# drivers modules must add to drivers an object with a get_status() method
# so that 'status' can return the status of all active drivers
drivers = {}
# keep a list of RS-232 devices that have been recognized by a driver,
# so other drivers can skip them during probes
rs232_devices = {} # {'/path/to/device': 'driver'}
rs232_lock = Lock() # must be held to update `rs232_devices`
class Proxy(http.Controller):
def get_status(self):
statuses = {}
for driver in drivers:
statuses[driver] = drivers[driver].get_status()
return statuses
@http.route('/hw_proxy/hello', type='http', auth='none', cors='*')
def hello(self):
return "ping"
@http.route('/hw_proxy/handshake', type='json', auth='none', cors='*')
def handshake(self):
return True
@http.route('/hw_proxy/status', type='http', auth='none', cors='*')
def status_http(self, debug=None, **kwargs):
resp = """
<!DOCTYPE HTML>
<html>
<head>
<title>Odoo's PosBox</title>
<style>
body {
width: 480px;
margin: 60px auto;
font-family: sans-serif;
text-align: justify;
color: #6B6B6B;
}
.device {
border-bottom: solid 1px rgb(216,216,216);
padding: 9px;
}
.device:nth-child(2n) {
background:rgb(240,240,240);
}
</style>
</head>
<body>
<h1>Hardware Status</h1>
<p>The list of enabled drivers and their status</p>
"""
statuses = self.get_status()
for driver in statuses:
status = statuses[driver]
if status['status'] == 'connecting':
color = 'black'
elif status['status'] == 'connected':
color = 'green'
else:
color = 'red'
resp += "<h3 style='color:"+color+";'>"+driver+' : '+status['status']+"</h3>\n"
resp += "<ul>\n"
for msg in status['messages']:
resp += '<li>'+msg+'</li>\n'
resp += "</ul>\n"
resp += """
<h2>Connected Devices</h2>
<p>The list of connected USB devices as seen by the posbox</p>
"""
if debug is None:
resp += """(<a href="/hw_proxy/status?debug">debug version</a>)"""
devices = commands.getoutput("lsusb").split('\n')
count = 0
resp += "<div class='devices'>\n"
for device in devices:
device_name = device[device.find('ID')+2:]
device_id = device_name.split()[0]
if not (device_id in BANNED_DEVICES):
resp+= "<div class='device' data-device='"+device+"'>"+device_name+"</div>\n"
count += 1
if count == 0:
resp += "<div class='device'>No USB Device Found</div>"
resp += "</div>\n</body>\n</html>\n\n"
if debug is not None:
resp += """
<h3>Debug version</h3>
<p><tt>lsusb -v</tt> output:</p>
<pre>
%s
</pre>
""" % subprocess.check_output('lsusb -v', shell=True)
return request.make_response(resp,{
'Cache-Control': 'no-cache',
'Content-Type': 'text/html; charset=utf-8',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
})
@http.route('/hw_proxy/status_json', type='json', auth='none', cors='*')
def status_json(self):
return self.get_status()
@http.route('/hw_proxy/scan_item_success', type='json', auth='none', cors='*')
def scan_item_success(self, ean):
"""
A product has been scanned with success
"""
print 'scan_item_success: ' + str(ean)
@http.route('/hw_proxy/scan_item_error_unrecognized', type='json', auth='none', cors='*')
def scan_item_error_unrecognized(self, ean):
"""
A product has been scanned without success
"""
print 'scan_item_error_unrecognized: ' + str(ean)
@http.route('/hw_proxy/help_needed', type='json', auth='none', cors='*')
def help_needed(self):
"""
The user wants an help (ex: light is on)
"""
print "help_needed"
@http.route('/hw_proxy/help_canceled', type='json', auth='none', cors='*')
def help_canceled(self):
"""
The user stops the help request
"""
print "help_canceled"
@http.route('/hw_proxy/payment_request', type='json', auth='none', cors='*')
def payment_request(self, price):
"""
The PoS will activate the method payment
"""
print "payment_request: price:"+str(price)
return 'ok'
@http.route('/hw_proxy/payment_status', type='json', auth='none', cors='*')
def payment_status(self):
print "payment_status"
return { 'status':'waiting' }
@http.route('/hw_proxy/payment_cancel', type='json', auth='none', cors='*')
def payment_cancel(self):
print "payment_cancel"
@http.route('/hw_proxy/transaction_start', type='json', auth='none', cors='*')
def transaction_start(self):
print 'transaction_start'
@http.route('/hw_proxy/transaction_end', type='json', auth='none', cors='*')
def transaction_end(self):
print 'transaction_end'
@http.route('/hw_proxy/cashier_mode_activated', type='json', auth='none', cors='*')
def cashier_mode_activated(self):
print 'cashier_mode_activated'
@http.route('/hw_proxy/cashier_mode_deactivated', type='json', auth='none', cors='*')
def cashier_mode_deactivated(self):
print 'cashier_mode_deactivated'
@http.route('/hw_proxy/open_cashbox', type='json', auth='none', cors='*')
def open_cashbox(self):
print 'open_cashbox'
@http.route('/hw_proxy/print_receipt', type='json', auth='none', cors='*')
def print_receipt(self, receipt):
print 'print_receipt' + str(receipt)
@http.route('/hw_proxy/is_scanner_connected', type='json', auth='none', cors='*')
def is_scanner_connected(self, receipt):
print 'is_scanner_connected?'
return False
@http.route('/hw_proxy/scanner', type='json', auth='none', cors='*')
def scanner(self, receipt):
print 'scanner'
time.sleep(10)
return ''
@http.route('/hw_proxy/log', type='json', auth='none', cors='*')
def log(self, arguments):
_logger.info(' '.join(str(v) for v in arguments))
@http.route('/hw_proxy/print_pdf_invoice', type='json', auth='none', cors='*')
def print_pdf_invoice(self, pdfinvoice):
print 'print_pdf_invoice' + str(pdfinvoice)
| agpl-3.0 |
JWDebelius/American-Gut | tests/test_util.py | 1 | 13782 | #!/usr/bin/env python
import os
import pandas as pd
from StringIO import StringIO
from unittest import TestCase, main
from numpy import array, nan
from biom import Table
from pandas.util.testing import assert_frame_equal
from americangut.util import (
slice_mapping_file, parse_mapping_file,
verify_subset, concatenate_files, trim_fasta, count_samples,
count_seqs, count_unique_participants, clean_and_reformat_mapping,
add_alpha_diversity, get_single_id_lists
)
__author__ = "Daniel McDonald"
__copyright__ = "Copyright 2013, The American Gut Project"
__credits__ = ["Daniel McDonald", "Adam Robbins-Pianka"]
__license__ = "BSD"
__version__ = "unversioned"
__maintainer__ = "Daniel McDonald"
__email__ = "mcdonadt@colorado.edu"
class UtilTests(TestCase):
def test_count_samples(self):
test_mapping = ["#SampleID\tfoo\tbar",
"A\t1\t2",
"B\t1\t3",
"C\t2\t4",
"D\t3\t5",
"E\t2\t6"]
obs = count_samples(iter(test_mapping))
exp = 5
self.assertEqual(obs, exp)
obs = count_samples(iter(test_mapping), criteria={'foo': '2'})
exp = 2
def test_count_seqs(self):
test_seqs = [">a b",
"aattggcc",
">b.xyz stuff",
"asdasd",
">c",
"qweasd",
">d.foo",
"qweasdasd"]
obs = count_seqs(iter(test_seqs))
exp = 4
self.assertEqual(obs, exp)
obs = count_seqs(iter(test_seqs), subset=['b', 'c', 'foo'])
exp = 2
self.assertEqual(obs, exp)
def test_count_unique_participants(self):
test_mapping = ["#SampleID\tfoo\tbar\tHOST_SUBJECT_ID",
"A\t1\t2\tx",
"B\t1\t3\tx",
"C\t2\t4\ty",
"D\t3\t5\tz",
"E\t2\t6\tw"]
obs = count_unique_participants(iter(test_mapping))
exp = 4
self.assertEqual(obs, exp)
obs = count_unique_participants(iter(test_mapping),
criteria={'foo': '1'})
exp = 1
self.assertEqual(obs, exp)
obs = count_unique_participants(iter(test_mapping),
criteria={'foo': '2'})
exp = 2
self.assertEqual(obs, exp)
def test_verify_subset(self):
metadata = [('a','other stuff\tfoo'), ('b', 'asdasdasd'),
('c','123123123')]
table = Table(array([[1,2,3],[4,5,6]]),
['x', 'y'],
['a', 'b', 'c'])
self.assertTrue(verify_subset(table, metadata))
table = Table(array([[1,2],[3,4]]),
['x','y'],
['a','b'])
self.assertTrue(verify_subset(table, metadata))
table = Table(array([[1,2,3],[4,5,6]]),
['x','y'],
['a','b','x'])
self.assertFalse(verify_subset(table, metadata))
def test_slice_mapping_file(self):
header, metadata = parse_mapping_file(StringIO(test_mapping))
table = Table(array([[1,2],[4,5]]),
['x','y'],
['a','c'])
exp = ["a\t1\t123123", "c\tpoop\tdoesn't matter"]
obs = slice_mapping_file(table, metadata)
self.assertEqual(obs,exp)
def test_parse_mapping_file(self):
exp = ("#SampleIDs\tfoo\tbar", [['a','1\t123123'],
['b','yy\txxx'],
['c',"poop\tdoesn't matter"]])
obs = parse_mapping_file(StringIO(test_mapping))
self.assertEqual(obs, exp)
def test_concatenate_files(self):
expected_output = concat_test_input + concat_test_input
input_files = [StringIO(concat_test_input),
StringIO(concat_test_input)]
output_file = StringIO()
concatenate_files(input_files, output_file)
output_file.seek(0)
self.assertEqual(expected_output, output_file.read())
# try again with a tiny chunk size
input_files = [StringIO(concat_test_input),
StringIO(concat_test_input)]
output_file = StringIO()
concatenate_files(input_files, output_file, 2)
output_file.seek(0)
self.assertEqual(expected_output, output_file.read())
def test_trim_fasta(self):
infasta = StringIO(test_fasta)
# Trim length 10
expected = (">seq1\n"
"0123456789\n"
">seq2\n"
"0123456789\n"
">seq3\n"
"012345\n")
outfasta = StringIO()
trim_fasta(infasta, outfasta, 10)
outfasta.seek(0)
self.assertEqual(expected, outfasta.read())
def test_clean_and_reformat_mapping(self):
"""Exercise the reformat mapping code, verify expected results"""
out = StringIO()
reformat_mapping_testdata.seek(0)
clean_and_reformat_mapping(reformat_mapping_testdata, out, 'body_site',
'test')
out.seek(0)
# verify the resulting header structure
test_mapping = [l.strip().split('\t') for l in out]
test_header = test_mapping[0]
self.assertEqual(test_header[-4:], ['SIMPLE_BODY_SITE',
'TITLE_ACRONYM', 'TITLE_BODY_SITE',
'HMP_SITE'])
self.assertEqual(test_mapping[1][:], ['A', 'w00t', '43.0',
'UBERON_mucosa_of_tongue', '5',
'ORAL', 'test', 'test-ORAL',
'ORAL'])
self.assertEqual(test_mapping[2][:], ['B', 'left', '51.0',
'UBERON:FECES', '10',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[3][:], ['C', 'right', '12.0',
'UBERON_FECES', '15',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[4][:], ['E', 'stuff', '56.0',
'UBERON:SKIN', '37',
'SKIN', 'test', 'test-SKIN',
'SKIN'])
def test_clean_and_reformat_mapping_nopgp(self):
"""Exercise the reformat mapping code, verify expected results"""
out = StringIO()
reformat_mapping_testdata.seek(0)
clean_and_reformat_mapping(reformat_mapping_testdata, out, 'body_site',
'test')
out.seek(0)
# verify the resulting header structure
test_mapping = [l.strip().split('\t') for l in out]
test_header = test_mapping[0]
self.assertEqual(test_header[-4:], ['SIMPLE_BODY_SITE',
'TITLE_ACRONYM', 'TITLE_BODY_SITE',
'HMP_SITE'])
self.assertEqual(test_mapping[1][:], ['A', 'w00t', '43.0',
'UBERON_mucosa_of_tongue', '5',
'ORAL', 'test', 'test-ORAL',
'ORAL'])
self.assertEqual(test_mapping[2][:], ['B', 'left', '51.0',
'UBERON:FECES', '10',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[3][:], ['C', 'right', '12.0',
'UBERON_FECES', '15',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[4][:], ['E', 'stuff', '56.0',
'UBERON:SKIN', '37',
'SKIN', 'test', 'test-SKIN',
'SKIN'])
def test_clean_and_reformat_mapping_allpgp(self):
"""Exercise the reformat mapping code, verify expected results"""
out = StringIO()
reformat_mapping_testdata.seek(0)
clean_and_reformat_mapping(reformat_mapping_testdata, out, 'body_site',
'test')
out.seek(0)
# verify the resulting header structure
test_mapping = [l.strip().split('\t') for l in out]
test_header = test_mapping[0]
self.assertEqual(test_header[-4:], ['SIMPLE_BODY_SITE',
'TITLE_ACRONYM', 'TITLE_BODY_SITE',
'HMP_SITE'])
self.assertEqual(test_mapping[1][:], ['A', 'w00t', '43.0',
'UBERON_mucosa_of_tongue', '5',
'ORAL', 'test', 'test-ORAL',
'ORAL'])
self.assertEqual(test_mapping[2][:], ['B', 'left', '51.0',
'UBERON:FECES', '10',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[3][:], ['C', 'right', '12.0',
'UBERON_FECES', '15',
'FECAL', 'test', 'test-FECAL',
'FECAL'])
self.assertEqual(test_mapping[4][:], ['E', 'stuff', '56.0',
'UBERON:SKIN', '37',
'SKIN', 'test', 'test-SKIN',
'SKIN'])
def test_add_alpha_diversity(self):
map_ = pd.DataFrame(
array([
['GAZ:w00t', '43.0', 'UBERON_mucosa_of_tongue', '5'],
['GAZ:left', '51.0', 'UBERON:FECES', '10'],
['GAZ:right', '12.0', 'UBERON_FECES', '15'],
['GAZ:stuff', '32.0', 'unknown', '26'],
['GAZ:stuff', '56.0', 'UBERON:SKIN', '37'],
]),
columns=['COUNTRY', 'AGE', 'BODY_SITE', 'BMI'],
index=['A', 'B', 'C', 'D', 'E']
)
alpha = {
'alpha_1': pd.DataFrame(
array([
['0', '1', '2', '3', '4'],
['100', '100', '100', '100', '100'],
[nan, nan, nan, nan, nan],
['14.5', '14.0', '15.1', '14.7', '14.4'],
['12.1', '15.2', '13.1', '14.1', '12.8'],
['16.2', '16.5', '16.9', '15.9', '16.2'],
['10.1', '9.8', '10.5', '10.0', '10.2'],
]),
columns=[
'alpha_rarefaction_100_0.txt',
'alpha_rarefaction_100_1.txt',
'alpha_rarefaction_100_2.txt',
'alpha_rarefaction_100_3.txt',
'alpha_rarefaction_100_4.txt',
],
index=['sequences per sample', 'iteration',
'A', 'B', 'C', 'D', 'E']
)
}
expected = pd.DataFrame(
array([
['GAZ:left', '51.0', 'UBERON:FECES', '10', 14.54],
['GAZ:right', '12.0', 'UBERON_FECES', '15', 13.46],
['GAZ:stuff', '32.0', 'unknown', '26', 16.34],
['GAZ:stuff', '56.0', 'UBERON:SKIN', '37', 10.12]
]),
index=['B', 'C', 'D', 'E'],
columns=['COUNTRY', 'AGE', 'BODY_SITE', 'BMI', 'alpha_1']
)
expected['alpha_1'] = expected['alpha_1'].astype(float)
test = add_alpha_diversity(map_, alpha)
assert_frame_equal(expected, test)
def test_get_single_id_list(self):
map_ = pd.DataFrame(
array([
['GAZ:w00t', '43.0', 'UBERON_mucosa_of_tongue', '5', 'A',
'12'],
['GAZ:left', '51.0', 'UBERON:FECES', '10', 'B', '1500'],
['GAZ:right', '12.0', 'UBERON_FECES', '15', 'C', '121'],
['GAZ:stuff', '32.0', 'unknown', '26', 'D', '150'],
['GAZ:stuff', '56.0', 'UBERON:SKIN', '37', 'E', '201'],
]),
columns=['COUNTRY', 'AGE', 'BODY_SITE', 'BMI', 'HOST_SUBJECT_ID',
'depth'],
index=['A', 'B', 'C', 'D', 'E']
)
depths = [100]
test = get_single_id_lists(map_, depths)
known = {100: ['B', 'C', 'D', 'E'],
'unrare': ['A', 'B', 'C', 'D', 'E']}
self.assertEqual(test, known)
test_mapping = """#SampleIDs\tfoo\tbar
a\t1\t123123
b\tyy\txxx
c\tpoop\tdoesn't matter
"""
concat_test_input="""This is
a
test file that is used
in the concatenation test. The file will be concatenated to itself."""
test_fasta = """>seq1
0123456789
>seq2
0123456789AB
>seq3
012345"""
reformat_mapping_testdata = StringIO(
"""#SampleID COUNTRY AGE BODY_SITE BMI
A GAZ:w00t 43.0 UBERON_mucosa_of_tongue 5
B GAZ:left 51.0 UBERON:FECES 10
C GAZ:right 12.0 UBERON_FECES 15
D GAZ:stuff 32.0 unknown 26
E GAZ:stuff 56.0 UBERON:SKIN 37
""")
if __name__ == '__main__':
main()
| bsd-3-clause |
grantvk/aima-python | submissions/Fritz/myNN.py | 13 | 4756 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Fritz import medal_of_honor
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
honordata = DataFrame()
honordata.data = []
honortarget = []
class DataFrame2:
data2 = []
feature_names2 = []
target2 = []
target_names2 = []
honortarget2 = []
honordata2 = DataFrame2()
honordata2.data = []
medalofhonor = medal_of_honor.get_awardees(test=True)
for issued in medalofhonor:
try:
date = int(issued['birth']["date"]["year"])
honortarget.append(date)
date2 = int(issued['awarded']["date"]["month"])
honortarget2.append(date2)
day = int(issued['awarded']['date']['day'])
month = int(issued['awarded']['date']['month'])
year = int(issued['awarded']['date']['year'])
dayBorn = int(issued['birth']['date']['day'])
monthBorn = int(issued['birth']['date']['month'])
yearBorn = int(issued['birth']['date']['year'])
honordata.data.append([day, month, year])
honordata2.data.append([dayBorn, monthBorn, yearBorn])
except:
traceback.print_exc()
honordata.feature_names = [
'day',
'month',
'year',
]
honordata2.feature_names = [
'dayBorn',
'monthBorn',
'yearBorn',
]
honordata.target = []
honordata2.target = []
def targetdata(HDate):
if (HDate > 1880 and HDate != -1):
return 1
return 0
def targetdata2(HDate2):
if (HDate2 > 10 and HDate2 != -1):
return 1
return 0
for issued in honortarget:
TD = targetdata(issued)
honordata.target.append(TD)
honordata.target_names = [
'Born before 1880',
'Born after 1880',
]
for issued2 in honortarget2:
TD2 = targetdata2(issued2)
honordata2.target.append(TD2)
honordata2.target_names = [
'Awarded on or before October',
'Awarded after October',
]
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
# hidden_layer_sizes = (100,),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Scaling the data.
'''
dateScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(honordata.data)
dateScaled.data = scaleGrid(honordata.data)
dateScaled.feature_names = honordata.feature_names
dateScaled.target = honordata.target
dateScaled.target_names = honordata.target_names
dateScaled2 = DataFrame2()
def setupScales2(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid2(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled2 = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled2)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(honordata2.data)
dateScaled2.data = scaleGrid2(honordata2.data)
dateScaled2.feature_names = honordata2.feature_names
dateScaled2.target = honordata2.target
dateScaled2.target_names = honordata2.target_names
Examples = {
'Default Date':{
'frame': honordata,
},
'DateSGD': {
'frame': honordata,
'mlpc': mlpc
},
'dateScaled2': {
'frame': dateScaled,
},
}
| mit |
sbarton272/AcousticBarcodes-Explorations | barcodes/dxfwrite/build/lib/dxfwrite/std.py | 2 | 12517 | #!/usr/bin/env python
#coding:utf-8
# Purpose: standard data and definitions
# module belongs to package: dxfwrite.py
# Created: 09.02.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
__author__ = "mozman <mozman@gmx.at>"
from array import array
from dxfwrite.htmlcolors import get_color_tuple_by_name
# dxf default pen assignment:
# 1 : 1.40mm - red
# 2 : 0.35mm - yellow
# 3 : 0.70mm - green
# 4 : 0.50mm - cyan
# 5 : 0.13mm - blue
# 6 : 1.00mm - magenta
# 7 : 0.25mm - white/black
# 8, 9 : 2.00mm
# >=10 : 1.40mm
# iso/din default pen assignment:
# 1 : 0.50mm - red/brown
# 2 : 0.35mm - yellow
# 3 : 1.00mm - green
# 4 : 0.70mm - cyan
# 5 : 0.70mm - blue
# 6 : 0.18mm - magenta
# 7 : 0.25mm - white/black
# >=8 : 0.25
dxf_default_color_table = [ # [0] is a dummy value, valid dxf color index = [1..255]
(0, 0, 0), (255, 0, 0), (255, 255, 0), (0, 255, 0), (0, 255, 255), (0, 0, 255),
(255, 0, 255), (255, 255, 255), (65, 65, 65), (128, 128, 128), (255, 0, 0),
(255, 170, 170), (189, 0, 0), (189, 126, 126), (129, 0, 0), (129, 86, 86),
(104, 0, 0), (104, 69, 69), (79, 0, 0), (79, 53, 53), (255, 63, 0),
(255, 191, 170), (189, 46, 0), (189, 141, 126), (129, 31, 0), (129, 96, 86),
(104, 25, 0), (104, 78, 69), (79, 19, 0), (79, 59, 53), (255, 127, 0),
(255, 212, 170), (189, 94, 0), (189, 157, 126), (129, 64, 0), (129, 107, 86),
(104, 52, 0), (104, 86, 69), (79, 39, 0), (79, 66, 53), (255, 191, 0),
(255, 234, 170), (189, 141, 0), (189, 173, 126), (129, 96, 0), (129, 118, 86),
(104, 78, 0), (104, 95, 69), (79, 59, 0), (79, 73, 53), (255, 255, 0),
(255, 255, 170), (189, 189, 0), (189, 189, 126), (129, 129, 0), (129, 129, 86),
(104, 104, 0), (104, 104, 69), (79, 79, 0), (79, 79, 53), (191, 255, 0),
(234, 255, 170), (141, 189, 0), (173, 189, 126), (96, 129, 0), (118, 129, 86),
(78, 104, 0), (95, 104, 69), (59, 79, 0), (73, 79, 53), (127, 255, 0),
(212, 255, 170), (94, 189, 0), (157, 189, 126), (64, 129, 0), (107, 129, 86),
(52, 104, 0), (86, 104, 69), (39, 79, 0), (66, 79, 53), (63, 255, 0),
(191, 255, 170), (46, 189, 0), (141, 189, 126), (31, 129, 0), (96, 129, 86),
(25, 104, 0), (78, 104, 69), (19, 79, 0), (59, 79, 53), (0, 255, 0),
(170, 255, 170), (0, 189, 0), (126, 189, 126), (0, 129, 0), (86, 129, 86),
(0, 104, 0), (69, 104, 69), (0, 79, 0), (53, 79, 53), (0, 255, 63),
(170, 255, 191), (0, 189, 46), (126, 189, 141), (0, 129, 31), (86, 129, 96),
(0, 104, 25), (69, 104, 78), (0, 79, 19), (53, 79, 59), (0, 255, 127),
(170, 255, 212), (0, 189, 94), (126, 189, 157), (0, 129, 64), (86, 129, 107),
(0, 104, 52), (69, 104, 86), (0, 79, 39), (53, 79, 66), (0, 255, 191),
(170, 255, 234), (0, 189, 141), (126, 189, 173), (0, 129, 96), (86, 129, 118),
(0, 104, 78), (69, 104, 95), (0, 79, 59), (53, 79, 73), (0, 255, 255),
(170, 255, 255), (0, 189, 189), (126, 189, 189), (0, 129, 129), (86, 129, 129),
(0, 104, 104), (69, 104, 104), (0, 79, 79), (53, 79, 79), (0, 191, 255),
(170, 234, 255), (0, 141, 189), (126, 173, 189), (0, 96, 129), (86, 118, 129),
(0, 78, 104), (69, 95, 104), (0, 59, 79), (53, 73, 79), (0, 127, 255),
(170, 212, 255), (0, 94, 189), (126, 157, 189), (0, 64, 129), (86, 107, 129),
(0, 52, 104), (69, 86, 104), (0, 39, 79), (53, 66, 79), (0, 63, 255),
(170, 191, 255), (0, 46, 189), (126, 141, 189), (0, 31, 129), (86, 96, 129),
(0, 25, 104), (69, 78, 104), (0, 19, 79), (53, 59, 79), (0, 0, 255),
(170, 170, 255), (0, 0, 189), (126, 126, 189), (0, 0, 129), (86, 86, 129),
(0, 0, 104), (69, 69, 104), (0, 0, 79), (53, 53, 79), (63, 0, 255),
(191, 170, 255), (46, 0, 189), (141, 126, 189), (31, 0, 129), (96, 86, 129),
(25, 0, 104), (78, 69, 104), (19, 0, 79), (59, 53, 79), (127, 0, 255),
(212, 170, 255), (94, 0, 189), (157, 126, 189), (64, 0, 129), (107, 86, 129),
(52, 0, 104), (86, 69, 104), (39, 0, 79), (66, 53, 79), (191, 0, 255),
(234, 170, 255), (141, 0, 189), (173, 126, 189), (96, 0, 129), (118, 86, 129),
(78, 0, 104), (95, 69, 104), (59, 0, 79), (73, 53, 79), (255, 0, 255),
(255, 170, 255), (189, 0, 189), (189, 126, 189), (129, 0, 129), (129, 86, 129),
(104, 0, 104), (104, 69, 104), (79, 0, 79), (79, 53, 79), (255, 0, 191),
(255, 170, 234), (189, 0, 141), (189, 126, 173), (129, 0, 96), (129, 86, 118),
(104, 0, 78), (104, 69, 95), (79, 0, 59), (79, 53, 73), (255, 0, 127),
(255, 170, 212), (189, 0, 94), (189, 126, 157), (129, 0, 64), (129, 86, 107),
(104, 0, 52), (104, 69, 86), (79, 0, 39), (79, 53, 66), (255, 0, 63),
(255, 170, 191), (189, 0, 46), (189, 126, 141), (129, 0, 31), (129, 86, 96),
(104, 0, 25), (104, 69, 78), (79, 0, 19), (79, 53, 59), (51, 51, 51),
(80, 80, 80), (105, 105, 105), (130, 130, 130), (190, 190, 190), (255, 255, 255)]
# element [0] = default lineweight for undefined values
LW_DXF = [1.40, 1.40, 0.35, 0.70, 0.50, 0.13, 1.00, 0.25, 2.00, 2.00]
LW_ISO = [0.25, 0.50, 0.35, 1.00, 0.70, 0.70, 0.18, 0.25]
LW_DIN = LW_ISO
class DXFLineweight(object):
def __init__(self, lineweights=LW_DXF, user_styles=None):
self.set_defaults(lineweights)
self.add_user_styles(user_styles)
def set_defaults(self, lineweights):
"""Set default lineweights."""
self.lineweights = array('f', [lineweights[0]]*256)
for index, lw in enumerate(lineweights):
self.lineweights[index] = lw
def add_user_styles(self, user_styles):
"""Add all in <user_styles> defined lineweights."""
if user_styles is None:
return
for dxf_index in range(1, 256):
lw = user_styles.get_lineweight(dxf_index)
if lw is not None:
self.lineweights[dxf_index] = lw
def get(self, dxf_color_index):
"""Get 'real' lineweight for <dxf_color_index> in mm."""
if 0 < dxf_color_index < 256:
return self.lineweights[dxf_color_index]
else:
raise IndexError('Index out of range.')
RED = 0
GREEN = 1
BLUE = 2
class DXFColorIndex(object):
def __init__(self, color_table=dxf_default_color_table, user_styles=None,
start_index=1):
# _color_table[0] is a dummy element, valid dxf index is in the range [1..255]
# because of special meaning of color indices 0=BYBLOCK, 256=BYLAYER
self.color_table = color_table[:]
self.color_map = self.generate_color_map(self.color_table)
self.start_index = start_index # first dxf color element[0] is a dummy value
if user_styles is not None:
self.add_user_styles(user_styles)
@staticmethod
def generate_color_map(color_table):
def iter_colors_backwards():
lastindex = len(color_table) - 1
for index, color in enumerate(reversed(color_table)):
yield (lastindex - index, color)
color_map = dict(iter_colors_backwards())
if 0 in color_map: # index 0 means BYBLOCK
del color_map[0]
return color_map
def add_user_styles(self, pen_styles):
"""Add user styles to color_table and color_map.
pen_styles -- requires a method <get_color(dxf_color_index)>, which
returns for each dxf index a rgb-tuple or None if not defined
see also dxfwrite.acadctb.PenStyles object
"""
for dxf_color_index in range(self.start_index, len(self.color_table)):
user_color = pen_styles.get_color(dxf_color_index)
if user_color is not None:
self.color_table[dxf_color_index] = user_color
self.color_map = self.generate_color_map(self.color_table)
def get_rgb(self, index):
if self.start_index <= index < len(self.color_table):
return self.color_table[index]
else:
raise IndexError('Index out of range.')
def get_dxf_color_index(self, rgb):
""" Get dxf_color_index of color with the nearest rgb-values.
rgb -- (red, green , blue) values in range [0..255]
"""
def get_color_distance(color1, color2):
""" approximation for euclidean color distance for CIE XYZ color space
"""
rmean = (float(color1[RED]) + float(color2[RED])) / 2.0
delta_sqr = []
for index in (RED, GREEN, BLUE):
delta_sqr.append( (float(color1[index]) - float(color2[index]))**2 )
part1 = (2. + rmean / 256.) * delta_sqr[RED]
part2 = 4. * delta_sqr[GREEN]
part3 = (2. + (255. - rmean)/256.) * delta_sqr[BLUE]
return (part1 + part2 + part3) ** 0.5
def nearest_color_index():
min_distance = 100000.
min_index = -1
index = self.start_index
max_index = len(self.color_table)
while index < max_index:
color = self.color_table[index]
color_distance = get_color_distance(rgb, color)
if color_distance < min_distance:
min_distance = color_distance
min_index = index
index += 1
return min_index
# stupid special case black/white == 7
# do not redefine color 7 with user values!!!
if rgb == (0, 0, 0):
return 7
try:
return self.color_map[rgb]
except KeyError:
return nearest_color_index()
def get_dxf_color_index_by_colorname(self, colorname):
colortuple = get_color_tuple_by_name(colorname)
return self.get_dxf_color_index(colortuple)
def linetypes():
""" Creates a list of standard linetypes.
"""
# dxf linetype definition
# name, description, elements:
# elements = [total_pattern_length, elem1, elem2, ...]
# total_pattern_length = sum(abs(elem))
# elem > 0 is line, < 0 is gap, 0.0 = dot;
return [("CONTINUOUS", "Solid", [0.0]),
("CENTER", "Center ____ _ ____ _ ____ _ ____ _ ____ _ ____",
[2.0, 1.25, -0.25, 0.25, -0.25]),
("CENTERX2", "Center (2x) ________ __ ________ __ ________",
[3.5, 2.5, -0.25, 0.5, -0.25]),
("CENTER2", "Center (.5x) ____ _ ____ _ ____ _ ____ _ ____",
[1.0, 0.625, -0.125, 0.125, -0.125]),
("DASHED", "Dashed __ __ __ __ __ __ __ __ __ __ __ __ __ _",
[0.6, 0.5, -0.1]),
("DASHEDX2", "Dashed (2x) ____ ____ ____ ____ ____ ____",
[1.2, 1.0, -0.2]),
("DASHED2", "Dashed (.5x) _ _ _ _ _ _ _ _ _ _ _ _ _ _",
[0.3, 0.25, -0.05]),
("PHANTOM", "Phantom ______ __ __ ______ __ __ ______",
[2.5, 1.25, -0.25, 0.25, -0.25, 0.25, -0.25]),
("PHANTOMX2", "Phantom (2x)____________ ____ ____ ____________",
[4.25, 2.5, -0.25, 0.5, -0.25, 0.5, -0.25]),
("PHANTOM2", "Phantom (.5x) ___ _ _ ___ _ _ ___ _ _ ___ _ _ ___",
[1.25, 0.625, -0.125, 0.125, -0.125, 0.125, -0.125]),
("DASHDOT", "Dash dot __ . __ . __ . __ . __ . __ . __ . __",
[1.4, 1.0, -0.2, 0.0, -0.2]),
("DASHDOTX2", "Dash dot (2x) ____ . ____ . ____ . ____",
[2.4, 2.0, -0.2, 0.0, -0.2]),
("DASHDOT2", "Dash dot (.5x) _ . _ . _ . _ . _ . _ . _ . _",
[0.7, 0.5, -0.1, 0.0, -0.1]),
("DOT", "Dot . . . . . . . . . . . . . . . .",
[0.2, 0.0, -0.2]),
("DOTX2", "Dot (2x) . . . . . . . . ",
[0.4, 0.0, -0.4]),
("DOT2", "Dot (.5) . . . . . . . . . . . . . . . . . . . ",
[0.1, 0.0, -0.1]),
("DIVIDE", "Divide __ . . __ . . __ . . __ . . __ . . __",
[1.6, 1.0, -0.2, 0.0, -0.2, 0.0, -0.2]),
("DIVIDEX2", "Divide (2x) ____ . . ____ . . ____ . . ____",
[2.6, 2.0, -0.2, 0.0, -0.2, 0.0, -0.2]),
("DIVIDE2", "Divide(.5x) _ . _ . _ . _ . _ . _ . _ . _",
[0.8, 0.5, -0.1, 0.0, -0.1, 0.0, -0.1]),
]
def styles():
""" Creates a list of standard styles.
"""
return [
('STANDARD', 'arial.ttf'),
('ARIAL', 'arial.ttf'),
('ARIAL_BOLD', 'arialbd.ttf'),
('ARIAL_ITALIC', 'ariali.ttf'),
('ARIAL_BOLD_ITALIC', 'arialbi.ttf'),
('ARIAL_BLACK', 'ariblk.ttf'),
('ISOCPEUR', 'isocpeur.ttf'),
('ISOCPEUR_ITALIC', 'isocpeui.ttf'),
('TIMES', 'times.ttf'),
('TIMES_BOLD', 'timesbd.ttf'),
('TIMES_ITALIC', 'timesi.ttf'),
('TIMES_BOLD_ITALIC', 'timesbi.ttf'),
]
| mit |
e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/pip/_vendor/html5lib/serializer.py | 328 | 14177 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import re
from codecs import register_error, xmlcharrefreplace_errors
from .constants import voidElements, booleanAttributes, spaceCharacters
from .constants import rcdataElements, entities, xmlEntities
from . import treewalkers, _utils
from xml.sax.saxutils import escape
_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
"\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
"\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
"\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
"\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
"\u3000]")
_encode_entity_map = {}
_is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((_is_ucs4 and len(v) > 1) or
(not _is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = _utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in _encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
_encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = _encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error("htmlentityreplace", htmlentityreplace_errors)
def serialize(input, tree="etree", encoding=None, **serializer_opts):
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
s = HTMLSerializer(**serializer_opts)
return s.render(walker(input), encoding)
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = "legacy" # be secure by default
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values="legacy"|"spec"|"always"
Whether to quote attribute values that don't require quoting
per legacy browser behaviour, when required by the standard, or always.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
unexpected_args = frozenset(kwargs) - frozenset(self.options)
if len(unexpected_args) > 0:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "htmlentityreplace")
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
# pylint:disable=too-many-nested-blocks
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from .filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# Alphabetical attributes is here under the assumption that none of
# the later filters add or change order of attributes; it needs to be
# before the sanitizer so escaped elements come out correctly
if self.alphabetical_attributes:
from .filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from .filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from .filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from .filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError("System identifer contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError("Unexpected </ in CDATA")
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
for (_, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) and
k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values == "always" or len(v) == 0:
quote_attr = True
elif self.quote_attr_values == "spec":
quote_attr = _quoteAttributeSpec.search(v) is not None
elif self.quote_attr_values == "legacy":
quote_attr = _quoteAttributeLegacy.search(v) is not None
else:
raise ValueError("quote_attr_values must be one of: "
"'always', 'spec', or 'legacy'")
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError("Comment contains --")
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError("Entity %s not recognized" % name)
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
class SerializeError(Exception):
"""Error in serialized tree"""
pass
| bsd-3-clause |
miguelinux/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/C/PyUtility/setup.py | 12 | 1265 | ## @file
# package and install PyEfiCompressor extension
#
# Copyright (c) 2008, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from distutils.core import setup, Extension
import os
if 'BASE_TOOLS_PATH' not in os.environ:
raise "Please define BASE_TOOLS_PATH to the root of base tools tree"
BaseToolsDir = os.environ['BASE_TOOLS_PATH']
setup(
name="PyUtility",
version="0.01",
ext_modules=[
Extension(
'PyUtility',
sources=[
'PyUtility.c'
],
include_dirs=[
os.path.join(BaseToolsDir, 'Source', 'C', 'Include'),
os.path.join(BaseToolsDir, 'Source', 'C', 'Include', 'Ia32'),
os.path.join(BaseToolsDir, 'Source', 'C', 'Common')
],
)
],
)
| gpl-2.0 |
erinspace/osf.io | osf/models/tag.py | 28 | 1187 | from django.db import models
from .base import BaseModel
class TagManager(models.Manager):
"""Manager that filters out system tags by default.
"""
def get_queryset(self):
return super(TagManager, self).get_queryset().filter(system=False)
class Tag(BaseModel):
name = models.CharField(db_index=True, max_length=1024)
system = models.BooleanField(default=False)
objects = TagManager()
all_tags = models.Manager()
def __unicode__(self):
if self.system:
return 'System Tag: {}'.format(self.name)
return u'{}'.format(self.name)
def _natural_key(self):
return hash(self.name + str(self.system))
@property
def _id(self):
return self.name.lower()
@classmethod
def load(cls, data, system=False):
"""For compatibility with v1: the tag name used to be the _id,
so we make Tag.load('tagname') work as if `name` were the primary key.
"""
try:
return cls.all_tags.get(system=system, name=data)
except cls.DoesNotExist:
return None
class Meta:
unique_together = ('name', 'system')
ordering = ('name', )
| apache-2.0 |
cselis86/edx-platform | common/djangoapps/student/tests/test_auto_auth.py | 9 | 7557 | from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.models import User
from django_comment_common.models import (
Role, FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_STUDENT)
from django_comment_common.utils import seed_permissions_roles
from student.models import CourseEnrollment, UserProfile
from util.testing import UrlResetMixin
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from mock import patch
import ddt
@ddt.ddt
class AutoAuthEnabledTestCase(UrlResetMixin, TestCase):
"""
Tests for the Auto auth view that we have for load testing.
"""
COURSE_ID_MONGO = 'edX/Test101/2014_Spring'
COURSE_ID_SPLIT = 'course-v1:edX+Test101+2014_Spring'
COURSE_IDS_DDT = (
(COURSE_ID_MONGO, SlashSeparatedCourseKey.from_deprecated_string(COURSE_ID_MONGO)),
(COURSE_ID_SPLIT, SlashSeparatedCourseKey.from_deprecated_string(COURSE_ID_SPLIT)),
(COURSE_ID_MONGO, CourseLocator.from_string(COURSE_ID_MONGO)),
(COURSE_ID_SPLIT, CourseLocator.from_string(COURSE_ID_SPLIT)),
)
@patch.dict("django.conf.settings.FEATURES", {"AUTOMATIC_AUTH_FOR_TESTING": True})
def setUp(self):
# Patching the settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING']
# value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(AutoAuthEnabledTestCase, self).setUp()
self.url = '/auto_auth'
self.client = Client()
def test_create_user(self):
"""
Test that user gets created when visiting the page.
"""
self._auto_auth()
self.assertEqual(User.objects.count(), 1)
user = User.objects.all()[0]
self.assertTrue(user.is_active)
self.assertFalse(user.profile.requires_parental_consent())
def test_create_same_user(self):
self._auto_auth(username='test')
self._auto_auth(username='test')
self.assertEqual(User.objects.count(), 1)
def test_create_multiple_users(self):
"""
Test to make sure multiple users are created.
"""
self._auto_auth()
self._auto_auth()
self.assertEqual(User.objects.all().count(), 2)
def test_create_defined_user(self):
"""
Test that the user gets created with the correct attributes
when they are passed as parameters on the auto-auth page.
"""
self._auto_auth(
username='robot', password='test',
email='robot@edx.org', full_name="Robot Name"
)
# Check that the user has the correct info
user = User.objects.get(username='robot')
self.assertEqual(user.username, 'robot')
self.assertTrue(user.check_password('test'))
self.assertEqual(user.email, 'robot@edx.org')
# Check that the user has a profile
user_profile = UserProfile.objects.get(user=user)
self.assertEqual(user_profile.name, "Robot Name")
# By default, the user should not be global staff
self.assertFalse(user.is_staff)
def test_create_staff_user(self):
# Create a staff user
self._auto_auth(username='test', staff='true')
user = User.objects.get(username='test')
self.assertTrue(user.is_staff)
# Revoke staff privileges
self._auto_auth(username='test', staff='false')
user = User.objects.get(username='test')
self.assertFalse(user.is_staff)
@ddt.data(*COURSE_IDS_DDT)
@ddt.unpack
def test_course_enrollment(self, course_id, course_key):
# Create a user and enroll in a course
self._auto_auth(username='test', course_id=course_id)
# Check that a course enrollment was created for the user
self.assertEqual(CourseEnrollment.objects.count(), 1)
enrollment = CourseEnrollment.objects.get(course_id=course_key)
self.assertEqual(enrollment.user.username, "test")
@ddt.data(*COURSE_IDS_DDT)
@ddt.unpack
def test_double_enrollment(self, course_id, course_key):
# Create a user and enroll in a course
self._auto_auth(username='test', course_id=course_id)
# Make the same call again, re-enrolling the student in the same course
self._auto_auth(username='test', course_id=course_id)
# Check that only one course enrollment was created for the user
self.assertEqual(CourseEnrollment.objects.count(), 1)
enrollment = CourseEnrollment.objects.get(course_id=course_key)
self.assertEqual(enrollment.user.username, "test")
@ddt.data(*COURSE_IDS_DDT)
@ddt.unpack
def test_set_roles(self, course_id, course_key):
seed_permissions_roles(course_key)
course_roles = dict((r.name, r) for r in Role.objects.filter(course_id=course_key))
self.assertEqual(len(course_roles), 4) # sanity check
# Student role is assigned by default on course enrollment.
self._auto_auth(username='a_student', course_id=course_id)
user = User.objects.get(username='a_student')
user_roles = user.roles.all()
self.assertEqual(len(user_roles), 1)
self.assertEqual(user_roles[0], course_roles[FORUM_ROLE_STUDENT])
self._auto_auth(username='a_moderator', course_id=course_id, roles='Moderator')
user = User.objects.get(username='a_moderator')
user_roles = user.roles.all()
self.assertEqual(
set(user_roles),
set([course_roles[FORUM_ROLE_STUDENT],
course_roles[FORUM_ROLE_MODERATOR]]))
# check multiple roles work.
self._auto_auth(username='an_admin', course_id=course_id,
roles='{},{}'.format(FORUM_ROLE_MODERATOR, FORUM_ROLE_ADMINISTRATOR))
user = User.objects.get(username='an_admin')
user_roles = user.roles.all()
self.assertEqual(
set(user_roles),
set([course_roles[FORUM_ROLE_STUDENT],
course_roles[FORUM_ROLE_MODERATOR],
course_roles[FORUM_ROLE_ADMINISTRATOR]]))
def _auto_auth(self, **params):
"""
Make a request to the auto-auth end-point and check
that the response is successful.
"""
response = self.client.get(self.url, params)
self.assertEqual(response.status_code, 200)
# Check that session and CSRF are set in the response
for cookie in ['csrftoken', 'sessionid']:
self.assertIn(cookie, response.cookies) # pylint: disable=maybe-no-member
self.assertTrue(response.cookies[cookie].value) # pylint: disable=maybe-no-member
class AutoAuthDisabledTestCase(UrlResetMixin, TestCase):
"""
Test that the page is inaccessible with default settings
"""
@patch.dict("django.conf.settings.FEATURES", {"AUTOMATIC_AUTH_FOR_TESTING": False})
def setUp(self):
# Patching the settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING']
# value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(AutoAuthDisabledTestCase, self).setUp()
self.url = '/auto_auth'
self.client = Client()
def test_auto_auth_disabled(self):
"""
Make sure automatic authentication is disabled.
"""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
| agpl-3.0 |
cyanna/edx-platform | common/djangoapps/embargo/migrations/0005_add_courseaccessrulehistory.py | 102 | 7906 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseAccessRuleHistory'
db.create_table('embargo_courseaccessrulehistory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('course_key', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
('snapshot', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('embargo', ['CourseAccessRuleHistory'])
def backwards(self, orm):
# Deleting model 'CourseAccessRuleHistory'
db.delete_table('embargo_courseaccessrulehistory')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'embargo.country': {
'Meta': {'ordering': "['country']", 'object_name': 'Country'},
'country': ('django_countries.fields.CountryField', [], {'unique': 'True', 'max_length': '2', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.countryaccessrule': {
'Meta': {'unique_together': "(('restricted_course', 'country'),)", 'object_name': 'CountryAccessRule'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'restricted_course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.RestrictedCourse']"}),
'rule_type': ('django.db.models.fields.CharField', [], {'default': "'blacklist'", 'max_length': '255'})
},
'embargo.courseaccessrulehistory': {
'Meta': {'object_name': 'CourseAccessRuleHistory'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'snapshot': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'})
},
'embargo.embargoedcourse': {
'Meta': {'object_name': 'EmbargoedCourse'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'embargoed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.embargoedstate': {
'Meta': {'object_name': 'EmbargoedState'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'embargoed_countries': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.ipfilter': {
'Meta': {'object_name': 'IPFilter'},
'blacklist': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'whitelist': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'embargo.restrictedcourse': {
'Meta': {'object_name': 'RestrictedCourse'},
'access_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'enroll_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['embargo'] | agpl-3.0 |
chenjun0210/tensorflow | tensorflow/python/training/adagrad_test.py | 81 | 12075 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for aggregate operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
class AdagradOptimizerTest(test.TestCase):
def doTestBasic(self, use_locking=False, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
if use_resource:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
3.0, initial_accumulator_value=0.1, use_locking=use_locking)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testBasic(self):
self.doTestBasic(use_locking=False)
def testBasicResource(self):
self.doTestBasic(use_locking=False, use_resource=True)
def testBasicLocked(self):
self.doTestBasic(use_locking=True)
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0], [3.0, 4.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType(
[[1.0, 2.0], [3.0, 4.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0, 1], [3, 4]], var0.eval(), atol=0.01)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([[1.0], [2.0]], var0.eval())
self.assertAllClose([[3.0], [4.0]], var1.eval())
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([[-1.6026098728179932], [2.0]]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([[3.0], [3.715679168701172]]), var1.eval())
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
def testSparseRepeatedIndicesResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var_repeated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_repeated = math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_repeated, [0, 0]))
var_aggregated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_aggregated = 2 * math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_aggregated, [0]))
update_op_repeated = adagrad.AdagradOptimizer(
2.0).minimize(loss_repeated)
update_op_aggregated = adagrad.AdagradOptimizer(
2.0).minimize(loss_aggregated)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(
var_repeated.eval(), var_aggregated.eval())
for _ in range(3):
update_op_repeated.run()
update_op_aggregated.run()
self.assertAllCloseAccordingToType(
var_repeated.eval(), var_aggregated.eval())
def testSparseStability(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
shape = [1, 6]
var0 = variables.Variable(
[[
0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257,
-0.0105945
]],
dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[[
-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05,
-8.4877e-05, -9.48906e-05
]],
shape=shape,
dtype=dtype),
constant_op.constant([0]),
constant_op.constant(shape))
ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
init = variables.global_variables_initializer()
for _ in range(100):
init.run()
ada_update.run()
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([[
0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,
-0.01029443
]]), var0.eval())
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
variables.global_variables_initializer().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
akretion/delivery-carrier | delivery_carrier_label_gls/report/__init__.py | 5 | 1056 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) All Rights Reserved 2014 Akretion
# @author David BEAL <david.beal@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import exception_helper
from . import label_helper
from . import label
| agpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/test/test_multifile.py | 138 | 1702 | from test import test_support
mimetools = test_support.import_module('mimetools', deprecated=True)
multifile = test_support.import_module('multifile', deprecated=True)
import cStringIO
msg = """Mime-Version: 1.0
Content-Type: multipart/mixed;
boundary="=====================_590453667==_"
X-OriginalArrivalTime: 05 Feb 2002 03:43:23.0310 (UTC) FILETIME=[42D88CE0:01C1ADF7]
--=====================_590453667==_
Content-Type: multipart/alternative;
boundary="=====================_590453677==_.ALT"
--=====================_590453677==_.ALT
Content-Type: text/plain; charset="us-ascii"; format=flowed
test A
--=====================_590453677==_.ALT
Content-Type: text/html; charset="us-ascii"
<html>
<b>test B</font></b></html>
--=====================_590453677==_.ALT--
--=====================_590453667==_
Content-Type: text/plain; charset="us-ascii"
Content-Disposition: attachment; filename="att.txt"
Attached Content.
Attached Content.
Attached Content.
Attached Content.
--=====================_590453667==_--
"""
def getMIMEMsg(mf):
global boundaries, linecount
msg = mimetools.Message(mf)
#print "TYPE: %s" % msg.gettype()
if msg.getmaintype() == 'multipart':
boundary = msg.getparam("boundary")
boundaries += 1
mf.push(boundary)
while mf.next():
getMIMEMsg(mf)
mf.pop()
else:
lines = mf.readlines()
linecount += len(lines)
def test_main():
global boundaries, linecount
boundaries = 0
linecount = 0
f = cStringIO.StringIO(msg)
getMIMEMsg(multifile.MultiFile(f))
assert boundaries == 2
assert linecount == 9
if __name__ == '__main__':
test_main()
| mit |
andrewklau/openshift-tools | openshift/installer/vendored/openshift-ansible-3.4.12-1/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py | 91 | 5294 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
"""Ansible module for modifying OpenShift configs during an upgrade"""
import os
import yaml
DOCUMENTATION = '''
---
module: openshift_upgrade_config
short_description: OpenShift Upgrade Config
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
def modify_api_levels(level_list, remove, ensure, msg_prepend='',
msg_append=''):
""" modify_api_levels """
changed = False
changes = []
if not isinstance(remove, list):
remove = []
if not isinstance(ensure, list):
ensure = []
if not isinstance(level_list, list):
new_list = []
changed = True
changes.append("%s created missing %s" % (msg_prepend, msg_append))
else:
new_list = level_list
for level in remove:
if level in new_list:
new_list.remove(level)
changed = True
changes.append("%s removed %s %s" % (msg_prepend, level, msg_append))
for level in ensure:
if level not in new_list:
new_list.append(level)
changed = True
changes.append("%s added %s %s" % (msg_prepend, level, msg_append))
return {'new_list': new_list, 'changed': changed, 'changes': changes}
def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
"""Main upgrade method for 3.0 to 3.1."""
changes = []
# Facts do not get transferred to the hosts where custom modules run,
# need to make some assumptions here.
master_config = os.path.join(config_base, 'master/master-config.yaml')
master_cfg_file = open(master_config, 'r')
config = yaml.safe_load(master_cfg_file.read())
master_cfg_file.close()
# Remove unsupported api versions and ensure supported api versions from
# master config
unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3']
supported_levels = ['v1']
result = modify_api_levels(config.get('apiLevels'), unsupported_levels,
supported_levels, 'master-config.yaml:', 'from apiLevels')
if result['changed']:
config['apiLevels'] = result['new_list']
changes.append(result['changes'])
if 'kubernetesMasterConfig' in config and 'apiLevels' in config['kubernetesMasterConfig']:
config['kubernetesMasterConfig'].pop('apiLevels')
changes.append('master-config.yaml: removed kubernetesMasterConfig.apiLevels')
# Add masterCA to serviceAccountConfig
if 'serviceAccountConfig' in config and 'masterCA' not in config['serviceAccountConfig']:
config['serviceAccountConfig']['masterCA'] = config['oauthConfig'].get('masterCA', 'ca.crt')
# Add proxyClientInfo to master-config
if 'proxyClientInfo' not in config['kubernetesMasterConfig']:
config['kubernetesMasterConfig']['proxyClientInfo'] = {
'certFile': 'master.proxy-client.crt',
'keyFile': 'master.proxy-client.key'
}
changes.append("master-config.yaml: added proxyClientInfo")
if len(changes) > 0:
if backup:
# TODO: Check success:
ansible_module.backup_local(master_config)
# Write the modified config:
out_file = open(master_config, 'w')
out_file.write(yaml.safe_dump(config, default_flow_style=False))
out_file.close()
return changes
def upgrade_master(ansible_module, config_base, from_version, to_version, backup):
"""Upgrade entry point."""
if from_version == '3.0':
if to_version == '3.1':
return upgrade_master_3_0_to_3_1(ansible_module, config_base, backup)
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name,
# redefined-outer-name
global module
module = AnsibleModule(
argument_spec=dict(
config_base=dict(required=True),
from_version=dict(required=True, choices=['3.0']),
to_version=dict(required=True, choices=['3.1']),
role=dict(required=True, choices=['master']),
backup=dict(required=False, default=True, type='bool')
),
supports_check_mode=True,
)
from_version = module.params['from_version']
to_version = module.params['to_version']
role = module.params['role']
backup = module.params['backup']
config_base = module.params['config_base']
try:
changes = []
if role == 'master':
changes = upgrade_master(module, config_base, from_version,
to_version, backup)
changed = len(changes) > 0
return module.exit_json(changed=changed, changes=changes)
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
except Exception, e:
return module.fail_json(msg=str(e))
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| apache-2.0 |
sankha93/selenium | py/selenium/webdriver/common/html5/application_cache.py | 52 | 1430 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The ApplicationCache implementaion.
"""
from selenium.webdriver.remote.command import Command
class ApplicationCache(object):
UNCACHED = 0
IDLE = 1
CHECKING = 2
DOWNLOADING = 3
UPDATE_READY = 4
OBSOLETE = 5
def __init__(self, driver):
"""
Creates a new Aplication Cache.
:Args:
- driver: The WebDriver instance which performs user actions.
"""
self.driver = driver
@property
def status(self):
"""
Returns a current status of application cache.
"""
return self.driver.execute(Command.GET_APP_CACHE_STATUS)['value']
| apache-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/django/conf/locale/__init__.py | 82 | 12130 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
"""
LANG_INFO is a dictionary structure to provide meta information about languages.
About name_local: capitalize it as if your language name was appearing
inside a sentence in your language.
The 'fallback' key can be used to specify a special fallback logic which doesn't
follow the traditional 'fr-ca' -> 'fr' fallback logic.
"""
LANG_INFO = {
'af': {
'bidi': False,
'code': 'af',
'name': 'Afrikaans',
'name_local': 'Afrikaans',
},
'ar': {
'bidi': True,
'code': 'ar',
'name': 'Arabic',
'name_local': 'العربيّة',
},
'ast': {
'bidi': False,
'code': 'ast',
'name': 'Asturian',
'name_local': 'asturianu',
},
'az': {
'bidi': True,
'code': 'az',
'name': 'Azerbaijani',
'name_local': 'Azərbaycanca',
},
'be': {
'bidi': False,
'code': 'be',
'name': 'Belarusian',
'name_local': 'беларуская',
},
'bg': {
'bidi': False,
'code': 'bg',
'name': 'Bulgarian',
'name_local': 'български',
},
'bn': {
'bidi': False,
'code': 'bn',
'name': 'Bengali',
'name_local': 'বাংলা',
},
'br': {
'bidi': False,
'code': 'br',
'name': 'Breton',
'name_local': 'brezhoneg',
},
'bs': {
'bidi': False,
'code': 'bs',
'name': 'Bosnian',
'name_local': 'bosanski',
},
'ca': {
'bidi': False,
'code': 'ca',
'name': 'Catalan',
'name_local': 'català',
},
'cs': {
'bidi': False,
'code': 'cs',
'name': 'Czech',
'name_local': 'česky',
},
'cy': {
'bidi': False,
'code': 'cy',
'name': 'Welsh',
'name_local': 'Cymraeg',
},
'da': {
'bidi': False,
'code': 'da',
'name': 'Danish',
'name_local': 'dansk',
},
'de': {
'bidi': False,
'code': 'de',
'name': 'German',
'name_local': 'Deutsch',
},
'el': {
'bidi': False,
'code': 'el',
'name': 'Greek',
'name_local': 'Ελληνικά',
},
'en': {
'bidi': False,
'code': 'en',
'name': 'English',
'name_local': 'English',
},
'en-au': {
'bidi': False,
'code': 'en-au',
'name': 'Australian English',
'name_local': 'Australian English',
},
'en-gb': {
'bidi': False,
'code': 'en-gb',
'name': 'British English',
'name_local': 'British English',
},
'eo': {
'bidi': False,
'code': 'eo',
'name': 'Esperanto',
'name_local': 'Esperanto',
},
'es': {
'bidi': False,
'code': 'es',
'name': 'Spanish',
'name_local': 'español',
},
'es-ar': {
'bidi': False,
'code': 'es-ar',
'name': 'Argentinian Spanish',
'name_local': 'español de Argentina',
},
'es-mx': {
'bidi': False,
'code': 'es-mx',
'name': 'Mexican Spanish',
'name_local': 'español de Mexico',
},
'es-ni': {
'bidi': False,
'code': 'es-ni',
'name': 'Nicaraguan Spanish',
'name_local': 'español de Nicaragua',
},
'es-ve': {
'bidi': False,
'code': 'es-ve',
'name': 'Venezuelan Spanish',
'name_local': 'español de Venezuela',
},
'et': {
'bidi': False,
'code': 'et',
'name': 'Estonian',
'name_local': 'eesti',
},
'eu': {
'bidi': False,
'code': 'eu',
'name': 'Basque',
'name_local': 'Basque',
},
'fa': {
'bidi': True,
'code': 'fa',
'name': 'Persian',
'name_local': 'فارسی',
},
'fi': {
'bidi': False,
'code': 'fi',
'name': 'Finnish',
'name_local': 'suomi',
},
'fr': {
'bidi': False,
'code': 'fr',
'name': 'French',
'name_local': 'français',
},
'fy': {
'bidi': False,
'code': 'fy',
'name': 'Frisian',
'name_local': 'frysk',
},
'ga': {
'bidi': False,
'code': 'ga',
'name': 'Irish',
'name_local': 'Gaeilge',
},
'gl': {
'bidi': False,
'code': 'gl',
'name': 'Galician',
'name_local': 'galego',
},
'he': {
'bidi': True,
'code': 'he',
'name': 'Hebrew',
'name_local': 'עברית',
},
'hi': {
'bidi': False,
'code': 'hi',
'name': 'Hindi',
'name_local': 'Hindi',
},
'hr': {
'bidi': False,
'code': 'hr',
'name': 'Croatian',
'name_local': 'Hrvatski',
},
'hu': {
'bidi': False,
'code': 'hu',
'name': 'Hungarian',
'name_local': 'Magyar',
},
'ia': {
'bidi': False,
'code': 'ia',
'name': 'Interlingua',
'name_local': 'Interlingua',
},
'io': {
'bidi': False,
'code': 'io',
'name': 'Ido',
'name_local': 'ido',
},
'id': {
'bidi': False,
'code': 'id',
'name': 'Indonesian',
'name_local': 'Bahasa Indonesia',
},
'is': {
'bidi': False,
'code': 'is',
'name': 'Icelandic',
'name_local': 'Íslenska',
},
'it': {
'bidi': False,
'code': 'it',
'name': 'Italian',
'name_local': 'italiano',
},
'ja': {
'bidi': False,
'code': 'ja',
'name': 'Japanese',
'name_local': '日本語',
},
'ka': {
'bidi': False,
'code': 'ka',
'name': 'Georgian',
'name_local': 'ქართული',
},
'kk': {
'bidi': False,
'code': 'kk',
'name': 'Kazakh',
'name_local': 'Қазақ',
},
'km': {
'bidi': False,
'code': 'km',
'name': 'Khmer',
'name_local': 'Khmer',
},
'kn': {
'bidi': False,
'code': 'kn',
'name': 'Kannada',
'name_local': 'Kannada',
},
'ko': {
'bidi': False,
'code': 'ko',
'name': 'Korean',
'name_local': '한국어',
},
'lb': {
'bidi': False,
'code': 'lb',
'name': 'Luxembourgish',
'name_local': 'Lëtzebuergesch',
},
'lt': {
'bidi': False,
'code': 'lt',
'name': 'Lithuanian',
'name_local': 'Lietuviškai',
},
'lv': {
'bidi': False,
'code': 'lv',
'name': 'Latvian',
'name_local': 'latviešu',
},
'mk': {
'bidi': False,
'code': 'mk',
'name': 'Macedonian',
'name_local': 'Македонски',
},
'ml': {
'bidi': False,
'code': 'ml',
'name': 'Malayalam',
'name_local': 'Malayalam',
},
'mn': {
'bidi': False,
'code': 'mn',
'name': 'Mongolian',
'name_local': 'Mongolian',
},
'mr': {
'bidi': False,
'code': 'mr',
'name': 'Marathi',
'name_local': 'मराठी',
},
'my': {
'bidi': False,
'code': 'my',
'name': 'Burmese',
'name_local': 'မြန်မာဘာသာ',
},
'nb': {
'bidi': False,
'code': 'nb',
'name': 'Norwegian Bokmal',
'name_local': 'norsk (bokmål)',
},
'ne': {
'bidi': False,
'code': 'ne',
'name': 'Nepali',
'name_local': 'नेपाली',
},
'nl': {
'bidi': False,
'code': 'nl',
'name': 'Dutch',
'name_local': 'Nederlands',
},
'nn': {
'bidi': False,
'code': 'nn',
'name': 'Norwegian Nynorsk',
'name_local': 'norsk (nynorsk)',
},
'no': {
'bidi': False,
'code': 'no',
'name': 'Norwegian',
'name_local': 'norsk',
},
'os': {
'bidi': False,
'code': 'os',
'name': 'Ossetic',
'name_local': 'Ирон',
},
'pa': {
'bidi': False,
'code': 'pa',
'name': 'Punjabi',
'name_local': 'Punjabi',
},
'pl': {
'bidi': False,
'code': 'pl',
'name': 'Polish',
'name_local': 'polski',
},
'pt': {
'bidi': False,
'code': 'pt',
'name': 'Portuguese',
'name_local': 'Português',
},
'pt-br': {
'bidi': False,
'code': 'pt-br',
'name': 'Brazilian Portuguese',
'name_local': 'Português Brasileiro',
},
'ro': {
'bidi': False,
'code': 'ro',
'name': 'Romanian',
'name_local': 'Română',
},
'ru': {
'bidi': False,
'code': 'ru',
'name': 'Russian',
'name_local': 'Русский',
},
'sk': {
'bidi': False,
'code': 'sk',
'name': 'Slovak',
'name_local': 'slovenský',
},
'sl': {
'bidi': False,
'code': 'sl',
'name': 'Slovenian',
'name_local': 'Slovenščina',
},
'sq': {
'bidi': False,
'code': 'sq',
'name': 'Albanian',
'name_local': 'shqip',
},
'sr': {
'bidi': False,
'code': 'sr',
'name': 'Serbian',
'name_local': 'српски',
},
'sr-latn': {
'bidi': False,
'code': 'sr-latn',
'name': 'Serbian Latin',
'name_local': 'srpski (latinica)',
},
'sv': {
'bidi': False,
'code': 'sv',
'name': 'Swedish',
'name_local': 'svenska',
},
'sw': {
'bidi': False,
'code': 'sw',
'name': 'Swahili',
'name_local': 'Kiswahili',
},
'ta': {
'bidi': False,
'code': 'ta',
'name': 'Tamil',
'name_local': 'தமிழ்',
},
'te': {
'bidi': False,
'code': 'te',
'name': 'Telugu',
'name_local': 'తెలుగు',
},
'th': {
'bidi': False,
'code': 'th',
'name': 'Thai',
'name_local': 'ภาษาไทย',
},
'tr': {
'bidi': False,
'code': 'tr',
'name': 'Turkish',
'name_local': 'Türkçe',
},
'tt': {
'bidi': False,
'code': 'tt',
'name': 'Tatar',
'name_local': 'Татарча',
},
'udm': {
'bidi': False,
'code': 'udm',
'name': 'Udmurt',
'name_local': 'Удмурт',
},
'uk': {
'bidi': False,
'code': 'uk',
'name': 'Ukrainian',
'name_local': 'Українська',
},
'ur': {
'bidi': True,
'code': 'ur',
'name': 'Urdu',
'name_local': 'اردو',
},
'vi': {
'bidi': False,
'code': 'vi',
'name': 'Vietnamese',
'name_local': 'Tiếng Việt',
},
'zh-cn': {
'fallback': ['zh-hans'],
'bidi': False,
'code': 'zh-cn',
'name': 'Simplified Chinese',
'name_local': '简体中文',
},
'zh-hans': {
'bidi': False,
'code': 'zh-hans',
'name': 'Simplified Chinese',
'name_local': '简体中文',
},
'zh-hant': {
'bidi': False,
'code': 'zh-hant',
'name': 'Traditional Chinese',
'name_local': '繁體中文',
},
'zh-hk': {
'fallback': ['zh-hant'],
},
'zh-mo': {
'fallback': ['zh-hant'],
},
'zh-my': {
'fallback': ['zh-hans'],
},
'zh-sg': {
'fallback': ['zh-hans'],
},
'zh-tw': {
'fallback': ['zh-hant'],
'bidi': False,
'code': 'zh-tw',
'name': 'Traditional Chinese',
'name_local': '繁體中文',
},
}
| mit |
the-elven-archer/bind_manager | bind_manager_v3/modules/dns_tools.py | 1 | 3907 | #!/usr/bin/env python2
import dns.query
import dns.zone
import dns.rdtypes
import dns.rdatatype
import dns.rdataclass
import dns.rdata
import dns.update
import dns.tsigkeyring
import IPy
import urllib
from operator import itemgetter
import time
from .models import *
from .main import *
class dns_utils():
def __init__(self):
pass
def get_transfer(self, master, zone):
""" Get the zone AXFR """
lista = []
try:
zone_xfr = dns.zone.from_xfr(dns.query.xfr(master, zone, relativize=False), relativize=False)
for name, node in zone_xfr.nodes.items():
rdatasets = node.rdatasets
for rdataset in rdatasets:
for rdata in rdataset:
lista.append({'name': name.to_text(),
'id': id,
'type': dns.rdatatype.to_text(rdataset.rdtype),
'ttl': rdataset.ttl,
'data': rdata,
'urlsafe_data': urllib.quote_plus(str(rdata))})
lista.sort(key=itemgetter('name'), reverse=False)
return lista
except dns.exception.FormError:
return None
def process_dns_queue(self, user=None, zone=None, view=None):
""" Process the list for the user and adds/removes DNS records """
# Getting key from view
tsigkey = get_view_key(view)
master = get_view_server(view)
zona = zones.select().where(zones.zone == zone).get()
update_list = change_cache.select().where(change_cache.zone_id == zona.id,
change_cache.username == user)
dns_keyring = dns.tsigkeyring.from_text(tsigkey)
dns_update = dns.update.Update(zone, keyring=dns_keyring)
for modification in update_list:
if modification.rr:
update_fqdn = "%s.%s." % (modification.rr, zone)
else:
update_fqdn = "%s." % (zone)
print modification.rr
print zone
if modification.action == "add":
dns_update.add(update_fqdn,
modification.ttl,
str(modification.type),
str(modification.data))
change_logs.create(action="add",
username=user,
date=time.strftime("%c"),
message="Zone %s - Add %s %s -> %s" % (zone,
str(update_fqdn),
str(modification.type),
str(modification.data)))
elif modification.action == "del":
dns_update.delete(modification.rr,
str(modification.type),
str(modification.data))
change_logs.create(action="delete",
username=user,
date=time.strftime("%c"),
message="Zone %s - Del %s %s -> %s" % (zone,
str(modification.rr),
str(modification.type),
str(modification.data)))
# Clean queue
delete_query = change_cache.delete().where(change_cache.id == modification.id)
delete_query.execute()
dns_response = dns.query.tcp(dns_update, master)
return True
| apache-2.0 |
LIS/lis-tempest | tempest/api/identity/test_extension.py | 12 | 1194 | # Copyright 2014 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest import test
class ExtensionTestJSON(base.BaseIdentityV2AdminTest):
@test.idempotent_id('85f3f661-f54c-4d48-b563-72ae952b9383')
def test_list_extensions(self):
# List all the extensions
body = self.non_admin_client.list_extensions()['extensions']['values']
self.assertNotEmpty(body)
keys = ['name', 'updated', 'alias', 'links',
'namespace', 'description']
for value in body:
for key in keys:
self.assertIn(key, value)
| apache-2.0 |
baylee/django | tests/gis_tests/geoapp/test_feeds.py | 49 | 4228 | from __future__ import unicode_literals
from xml.dom import minidom
from django.conf import settings
from django.contrib.sites.models import Site
from django.test import (
TestCase, modify_settings, override_settings, skipUnlessDBFeature,
)
from .models import City
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
@override_settings(ROOT_URLCONF='gis_tests.geoapp.urls')
@skipUnlessDBFeature("gis_enabled")
class GeoFeedTest(TestCase):
fixtures = ['initial']
def setUp(self):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
def assertChildNodes(self, elem, expected):
"Taken from syndication/tests.py."
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def test_geofeed_rss(self):
"Tests geographic feeds using GeoRSS over RSSv2."
# Uses `GEOSGeometry` in `item_geometry`
doc1 = minidom.parseString(self.client.get('/feeds/rss1/').content)
# Uses a 2-tuple in `item_geometry`
doc2 = minidom.parseString(self.client.get('/feeds/rss2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2.getElementsByTagName('channel')[0],
['title', 'link', 'description', 'language',
'lastBuildDate', 'item', 'georss:box', 'atom:link']
)
# Incrementing through the feeds.
for feed in [feed1, feed2]:
# Ensuring the georss namespace was added to the <rss> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the georss element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'georss:point'])
def test_geofeed_atom(self):
"Testing geographic feeds using GeoRSS over Atom."
doc1 = minidom.parseString(self.client.get('/feeds/atom1/').content)
doc2 = minidom.parseString(self.client.get('/feeds/atom2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2, ['title', 'link', 'id', 'updated', 'entry', 'georss:box'])
for feed in [feed1, feed2]:
# Ensuring the georsss namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:georss'), 'http://www.georss.org/georss')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), City.objects.count())
# Ensuring the georss element was added to each entry in the feed.
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'georss:point'])
def test_geofeed_w3c(self):
"Testing geographic feeds using W3C Geo."
doc = minidom.parseString(self.client.get('/feeds/w3cgeo1/').content)
feed = doc.firstChild
# Ensuring the geo namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute('xmlns:geo'), 'http://www.w3.org/2003/01/geo/wgs84_pos#')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the geo:lat and geo:lon element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'geo:lat', 'geo:lon'])
# Boxes and Polygons aren't allowed in W3C Geo feeds.
with self.assertRaises(ValueError): # Box in <channel>
self.client.get('/feeds/w3cgeo2/')
with self.assertRaises(ValueError): # Polygons in <entry>
self.client.get('/feeds/w3cgeo3/')
| bsd-3-clause |
rruebner/odoo | addons/l10n_syscohada/__openerp__.py | 430 | 1940 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2011 BAAMTU SARL (<http://www.baamtu.sn>).
# contact: leadsn@baamtu.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'OHADA - Accounting',
'version' : '1.0',
'author' : 'Baamtu Senegal',
'category' : 'Localization/Account Charts',
'description': """
This module implements the accounting chart for OHADA area.
===========================================================
It allows any company or association to manage its financial accounting.
Countries that use OHADA are the following:
-------------------------------------------
Benin, Burkina Faso, Cameroon, Central African Republic, Comoros, Congo,
Ivory Coast, Gabon, Guinea, Guinea Bissau, Equatorial Guinea, Mali, Niger,
Replica of Democratic Congo, Senegal, Chad, Togo.
""",
'website': 'http://www.baamtu.com',
'depends' : ['account', 'base_vat'],
'demo' : [],
'data' : ['l10n_syscohada_data.xml','l10n_syscohada_wizard.xml'],
'auto_install': False,
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
acozzette/fMBT | pythonshare/pythonshare/client.py | 1 | 9648 | # fMBT, free Model Based Testing tool
# Copyright (c) 2013-2015, Intel Corporation.
#
# Author: antti.kervinen@intel.com
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
# This library provides Python API for connecting to pythonshare
# servers, sending code for remote execution and exporting/importing
# namespaces.
"""pythonshare.client - interface for executing code on pythonshare servers
"""
import socket
import cPickle
import pythonshare
from pythonshare.messages import Exec, Exec_rv, Async_rv, Register_ns, Request_ns, Ns_rv
class Connection(object):
"""Connection to a Pythonshare server.
Example: connect to a server that is listening to port 8089 on localhost.
Execute *code* and evaluate *expression* on the default namespace on the
remote server.
c = Connection("localhost", 8089)
c.exec_(code)
c.eval_(expression)
Results of executed code are kept on the server after the
connection is closed.
Example: Register code that is executed in namespace "goodbye" after
closing the connection:
c.exec_in("goodbye", 'pythonshare_ns.exec_on_disconnect("code")')
"""
def __init__(self, host_or_from_server, port_or_to_server,
password=None, namespace="default"):
"""Connect to a pythonshare server
The server is listening to connections at host:port, or it can be
communicated via file-like objects from_server and to_server.
Parameters:
host_or_from_server (string or file-like object)
string: host
file: file for receiving messages from the server
port_or_to_server (int or file-like object)
int: port number
file: file for sending messages to the server
password (string, optional)
server password. The default is None, that is,
do not send password to the server when connecting.
namespace (string, optional)
the default namespace that is used on eval_() and exec_().
The default is "default".
"""
self._ns = namespace
if isinstance(host_or_from_server, str) and isinstance(port_or_to_server, int):
host = host_or_from_server
port = port_or_to_server
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
self._from_server = self._s.makefile("r")
self._to_server = self._s.makefile("w")
elif isinstance(host_or_from_server, file) and isinstance(port_or_to_server, file):
self._s = None
self._to_server = port_or_to_server
self._from_server = host_or_from_server
else:
raise ValueError("invalid host:port (str:int) or to_server:from_server (file:file)")
if password:
# authenticate to server
cPickle.dump(password, self._to_server)
self._to_server.flush()
auth_rv = cPickle.load(self._from_server)
try:
auth_ok = auth_rv.success
except AttributeError:
auth_ok = False
if not auth_ok:
raise pythonshare.AuthenticationError("Permission denied")
def make_local(self, rv):
if isinstance(rv, Exec_rv):
if rv.code_exc:
raise pythonshare.RemoteExecError(rv.code_exc)
elif rv.expr_exc:
raise pythonshare.RemoteEvalError(rv.expr_exc)
else:
rv = rv.expr_rv
return rv
def exec_(self, code, **kwargs):
"""Execute code in the default namespace.
See exec_in for optional parameters."""
return self.exec_in(self._ns, code, **kwargs)
def exec_in(self, namespace, code, expr=None, async=False, lock=True):
"""Execute code in a namespace.
Parameters:
namespace (string)
namespace in which the code and the expression (if
given) will be executed.
code (string)
Python code to be executed in the namespace.
expr (string, optional)
expression to be evaluated in the namespace after
executing the code.
async (boolean, optional)
If true, execute code and expr asynchronously. If
so, handle to the return value (Async_rv) will be
returned. The default is False.
lock (boolean, optional)
lock the namespace from others until this execution
has finished. The default is True.
Returns return value from expr or None.
Raise RemoteExecError or RemoteEvalError if code or expr caused
an exception in remote end, respectively.
"""
try:
cPickle.dump(Exec(namespace, code, expr, async=async, lock=lock), self._to_server)
self._to_server.flush()
return self.make_local(cPickle.load(self._from_server))
except EOFError:
raise pythonshare.PythonShareError(
'No connection to namespace "%s"' % (namespace,))
def eval_(self, expr, **kwargs):
"""Evaluate expr in the default namespace.
See eval_in for optional parameters."""
return self.eval_in(self._ns, expr, **kwargs)
def eval_in(self, namespace, expr, async=False, lock=True):
"""Evaluate expr in a namespace.
Parameters:
namespace (string)
namespace in which the expression will be evaluated.
expr (string)
Python expression.
async (boolean, optional)
If True, expression will be evaluated asynchronously
and a handle to the return value (Async_rv) will be
returned.
lock (boolean, optional)
lock the namespace from others until this execution
has finished. The default is True.
Returns return value of the expr.
Raises RemoteEvalError if expr caused an exception.
"""
return self.exec_in(namespace, "", expr, async=async, lock=lock)
def read_rv(self, async_rv, timeout=0):
"""Read return value of async call.
Parameters:
async_rv (string or Async_rv object)
Handle to asynchronous return value, created by
async exec_in or eval_in.
timeout (float or integer, optional)
-1: block until return value is ready and return it.
0: returns pythonshare.Busy immediately if return
value is not readable yet.
> 0: wait until this timeout (NOT IMPLEMENTED).
The defualt is 0.
"""
if isinstance(async_rv, str):
async_rv = eval(async_rv)
rv = self.eval_in(async_rv.ns, "pythonshare_ns.read_rv(%s)" % (async_rv,))
return rv
def export_ns(self, namespace):
"""Export namespace to remote peer
Parameters:
namespace (string)
Namespace to be exported, can be local or
remote to current host.
Returns True on success or raises an exception. If succeeded,
this connection becomes a server for requests from remote
peer. (The remote peer accesses registered namespace through
this connection object.)
"""
cPickle.dump(Register_ns(namespace), self._to_server)
self._to_server.flush()
rv = cPickle.load(self._from_server)
if isinstance(rv, Ns_rv) and rv.status:
return True
else:
raise pythonshare.PythonShareError(rv.errormsg)
def import_ns(self, namespace):
"""
"""
cPickle.dump(Request_ns(namespace), self._to_server)
self._to_server.flush()
rv = cPickle.load(self._from_server)
if isinstance(rv, Ns_rv) and rv.status:
return True
else:
raise pythonshare.PythonShareError(rv.errormsg)
def poll_rvs(self, namespace):
"""Poll available async return values from namespace.
Parameters:
namespace (string)
namespace from which return values are queried.
Example:
rv = c.eval_("time.sleep(1)", async=True)
print c.poll_rvs(rv.ns)
time.sleep(1)
print c.poll_rvs(rv.ns)
print c.read_rv(rv)
"""
return self.eval_in(namespace, "pythonshare_ns.poll_rvs()",
async=False, lock=False)
def close(self):
pythonshare._close(self._to_server, self._from_server, self._s)
def getpeername(self):
if self._s:
return self._s.getpeername()
else:
return (getattr(self._to_server, "name", None),
getattr(self._from_server, "name", None))
| lgpl-2.1 |
openhatch/oh-mainline | vendor/packages/twisted/twisted/mail/test/pop3testserver.py | 57 | 8173 | #!/usr/bin/env python
# -*- test-case-name: twisted.mail.test.test_pop3client -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.internet.protocol import Factory
from twisted.protocols import basic
from twisted.internet import reactor
import sys, time
USER = "test"
PASS = "twisted"
PORT = 1100
SSL_SUPPORT = True
UIDL_SUPPORT = True
INVALID_SERVER_RESPONSE = False
INVALID_CAPABILITY_RESPONSE = False
INVALID_LOGIN_RESPONSE = False
DENY_CONNECTION = False
DROP_CONNECTION = False
BAD_TLS_RESPONSE = False
TIMEOUT_RESPONSE = False
TIMEOUT_DEFERRED = False
SLOW_GREETING = False
"""Commands"""
CONNECTION_MADE = "+OK POP3 localhost v2003.83 server ready"
CAPABILITIES = [
"TOP",
"LOGIN-DELAY 180",
"USER",
"SASL LOGIN"
]
CAPABILITIES_SSL = "STLS"
CAPABILITIES_UIDL = "UIDL"
INVALID_RESPONSE = "-ERR Unknown request"
VALID_RESPONSE = "+OK Command Completed"
AUTH_DECLINED = "-ERR LOGIN failed"
AUTH_ACCEPTED = "+OK Mailbox open, 0 messages"
TLS_ERROR = "-ERR server side error start TLS handshake"
LOGOUT_COMPLETE = "+OK quit completed"
NOT_LOGGED_IN = "-ERR Unknown AUHORIZATION state command"
STAT = "+OK 0 0"
UIDL = "+OK Unique-ID listing follows\r\n."
LIST = "+OK Mailbox scan listing follows\r\n."
CAP_START = "+OK Capability list follows:"
class POP3TestServer(basic.LineReceiver):
def __init__(self, contextFactory = None):
self.loggedIn = False
self.caps = None
self.tmpUser = None
self.ctx = contextFactory
def sendSTATResp(self, req):
self.sendLine(STAT)
def sendUIDLResp(self, req):
self.sendLine(UIDL)
def sendLISTResp(self, req):
self.sendLine(LIST)
def sendCapabilities(self):
if self.caps is None:
self.caps = [CAP_START]
if UIDL_SUPPORT:
self.caps.append(CAPABILITIES_UIDL)
if SSL_SUPPORT:
self.caps.append(CAPABILITIES_SSL)
for cap in CAPABILITIES:
self.caps.append(cap)
resp = '\r\n'.join(self.caps)
resp += "\r\n."
self.sendLine(resp)
def connectionMade(self):
if DENY_CONNECTION:
self.disconnect()
return
if SLOW_GREETING:
reactor.callLater(20, self.sendGreeting)
else:
self.sendGreeting()
def sendGreeting(self):
self.sendLine(CONNECTION_MADE)
def lineReceived(self, line):
"""Error Conditions"""
uline = line.upper()
find = lambda s: uline.find(s) != -1
if TIMEOUT_RESPONSE:
# Do not respond to clients request
return
if DROP_CONNECTION:
self.disconnect()
return
elif find("CAPA"):
if INVALID_CAPABILITY_RESPONSE:
self.sendLine(INVALID_RESPONSE)
else:
self.sendCapabilities()
elif find("STLS") and SSL_SUPPORT:
self.startTLS()
elif find("USER"):
if INVALID_LOGIN_RESPONSE:
self.sendLine(INVALID_RESPONSE)
return
resp = None
try:
self.tmpUser = line.split(" ")[1]
resp = VALID_RESPONSE
except:
resp = AUTH_DECLINED
self.sendLine(resp)
elif find("PASS"):
resp = None
try:
pwd = line.split(" ")[1]
if self.tmpUser is None or pwd is None:
resp = AUTH_DECLINED
elif self.tmpUser == USER and pwd == PASS:
resp = AUTH_ACCEPTED
self.loggedIn = True
else:
resp = AUTH_DECLINED
except:
resp = AUTH_DECLINED
self.sendLine(resp)
elif find("QUIT"):
self.loggedIn = False
self.sendLine(LOGOUT_COMPLETE)
self.disconnect()
elif INVALID_SERVER_RESPONSE:
self.sendLine(INVALID_RESPONSE)
elif not self.loggedIn:
self.sendLine(NOT_LOGGED_IN)
elif find("NOOP"):
self.sendLine(VALID_RESPONSE)
elif find("STAT"):
if TIMEOUT_DEFERRED:
return
self.sendLine(STAT)
elif find("LIST"):
if TIMEOUT_DEFERRED:
return
self.sendLine(LIST)
elif find("UIDL"):
if TIMEOUT_DEFERRED:
return
elif not UIDL_SUPPORT:
self.sendLine(INVALID_RESPONSE)
return
self.sendLine(UIDL)
def startTLS(self):
if self.ctx is None:
self.getContext()
if SSL_SUPPORT and self.ctx is not None:
self.sendLine('+OK Begin TLS negotiation now')
self.transport.startTLS(self.ctx)
else:
self.sendLine('-ERR TLS not available')
def disconnect(self):
self.transport.loseConnection()
def getContext(self):
try:
from twisted.internet import ssl
except ImportError:
self.ctx = None
else:
self.ctx = ssl.ClientContextFactory()
self.ctx.method = ssl.SSL.TLSv1_METHOD
usage = """popServer.py [arg] (default is Standard POP Server with no messages)
no_ssl - Start with no SSL support
no_uidl - Start with no UIDL support
bad_resp - Send a non-RFC compliant response to the Client
bad_cap_resp - send a non-RFC compliant response when the Client sends a 'CAPABILITY' request
bad_login_resp - send a non-RFC compliant response when the Client sends a 'LOGIN' request
deny - Deny the connection
drop - Drop the connection after sending the greeting
bad_tls - Send a bad response to a STARTTLS
timeout - Do not return a response to a Client request
to_deferred - Do not return a response on a 'Select' request. This
will test Deferred callback handling
slow - Wait 20 seconds after the connection is made to return a Server Greeting
"""
def printMessage(msg):
print "Server Starting in %s mode" % msg
def processArg(arg):
if arg.lower() == 'no_ssl':
global SSL_SUPPORT
SSL_SUPPORT = False
printMessage("NON-SSL")
elif arg.lower() == 'no_uidl':
global UIDL_SUPPORT
UIDL_SUPPORT = False
printMessage("NON-UIDL")
elif arg.lower() == 'bad_resp':
global INVALID_SERVER_RESPONSE
INVALID_SERVER_RESPONSE = True
printMessage("Invalid Server Response")
elif arg.lower() == 'bad_cap_resp':
global INVALID_CAPABILITY_RESPONSE
INVALID_CAPABILITY_RESPONSE = True
printMessage("Invalid Capability Response")
elif arg.lower() == 'bad_login_resp':
global INVALID_LOGIN_RESPONSE
INVALID_LOGIN_RESPONSE = True
printMessage("Invalid Capability Response")
elif arg.lower() == 'deny':
global DENY_CONNECTION
DENY_CONNECTION = True
printMessage("Deny Connection")
elif arg.lower() == 'drop':
global DROP_CONNECTION
DROP_CONNECTION = True
printMessage("Drop Connection")
elif arg.lower() == 'bad_tls':
global BAD_TLS_RESPONSE
BAD_TLS_RESPONSE = True
printMessage("Bad TLS Response")
elif arg.lower() == 'timeout':
global TIMEOUT_RESPONSE
TIMEOUT_RESPONSE = True
printMessage("Timeout Response")
elif arg.lower() == 'to_deferred':
global TIMEOUT_DEFERRED
TIMEOUT_DEFERRED = True
printMessage("Timeout Deferred Response")
elif arg.lower() == 'slow':
global SLOW_GREETING
SLOW_GREETING = True
printMessage("Slow Greeting")
elif arg.lower() == '--help':
print usage
sys.exit()
else:
print usage
sys.exit()
def main():
if len(sys.argv) < 2:
printMessage("POP3 with no messages")
else:
args = sys.argv[1:]
for arg in args:
processArg(arg)
f = Factory()
f.protocol = POP3TestServer
reactor.listenTCP(PORT, f)
reactor.run()
if __name__ == '__main__':
main()
| agpl-3.0 |
shanglt/youtube-dl | youtube_dl/extractor/turbo.py | 140 | 2386 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
qualities,
xpath_text,
)
class TurboIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?turbo\.fr/videos-voiture/(?P<id>[0-9]+)-'
_API_URL = 'http://www.turbo.fr/api/tv/xml.php?player_generique=player_generique&id={0:}'
_TEST = {
'url': 'http://www.turbo.fr/videos-voiture/454443-turbo-du-07-09-2014-renault-twingo-3-bentley-continental-gt-speed-ces-guide-achat-dacia.html',
'md5': '33f4b91099b36b5d5a91f84b5bcba600',
'info_dict': {
'id': '454443',
'ext': 'mp4',
'duration': 3715,
'title': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ',
'description': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia...',
'thumbnail': 're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
playlist = self._download_xml(self._API_URL.format(video_id), video_id)
item = playlist.find('./channel/item')
if item is None:
raise ExtractorError('Playlist item was not found', expected=True)
title = xpath_text(item, './title', 'title')
duration = int_or_none(xpath_text(item, './durate', 'duration'))
thumbnail = xpath_text(item, './visuel_clip', 'thumbnail')
description = self._html_search_meta('description', webpage)
formats = []
get_quality = qualities(['3g', 'sd', 'hq'])
for child in item:
m = re.search(r'url_video_(?P<quality>.+)', child.tag)
if m:
quality = m.group('quality')
formats.append({
'format_id': quality,
'url': child.text,
'quality': get_quality(quality),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'duration': duration,
'thumbnail': thumbnail,
'description': description,
'formats': formats,
}
| unlicense |
lj1985730/vehicleCRM | crm/sqlite.py | 1 | 1746 | # coding=utf-8
import sqlite3
# noinspection PyUnresolvedReferences,SqlResolve
class Database(object):
"""
数据库操作对象
"""
def __init__(self):
self.conn = None
'''
提交
'''
def commit(self):
self.conn.commit()
'''
关闭
'''
def close(self):
self.conn.close()
'''
连接数据库
'''
def connect(self):
self.conn = sqlite3.connect("data\\vehicleCrm.db")
return self.conn
'''
执行查询语句
@sql 要执行的脚本
@data 数据元组
'''
def execute_query(self, sql, data):
cur = self.connect().cursor()
if data is not None and len(data) > 0:
res = cur.execute(sql, data)
else:
res = cur.execute(sql)
result = res.fetchall()
cur.close()
self.commit()
self.close()
return result
'''
执行修改语句
@sql 要执行的脚本
@data 数据元组
'''
def execute_update(self, sql, data):
cur = self.connect().cursor()
if data is not None and len(data) > 0:
cur.execute(sql, data)
else:
cur.execute(sql)
cur.close()
self.commit()
self.close()
'''
获取表全部数据
@table_name 表名
'''
def load_all(self, table_name):
sql = "SELECT * FROM %s;" % table_name
return self.execute_query(sql, ())
'''
获取表某条数据
@table_name 表名
@data_id 主键值
'''
def load_one(self, table_name, data_id):
sql = "SELECT * FROM %s WHERE ID = ?;" % table_name
return self.execute_query(sql, (data_id,))[0]
| apache-2.0 |
luci/luci-py | appengine/swarming/handlers_backend.py | 2 | 15567 | # Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Main entry point for Swarming backend handlers."""
import datetime
import json
import logging
import webapp2
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
from google.appengine import runtime
from google.protobuf import json_format
from proto.api import plugin_pb2
from components import decorators
from components import datastore_utils
from server import bq_state
from server import bot_groups_config
from server import bot_management
from server import config
from server import external_scheduler
from server import named_caches
from server import stats_bots
from server import stats_tasks
from server import task_queues
from server import task_request
from server import task_result
from server import task_scheduler
import ts_mon_metrics
## Cron jobs.
class _CronHandlerBase(webapp2.RequestHandler):
@decorators.require_cronjob
def get(self):
self.run_cron()
def run_cron(self):
raise NotImplementedError()
class CronBotDiedHandler(_CronHandlerBase):
"""Sets running tasks where the bot is not sending ping updates for several
minutes as BOT_DIED.
"""
def run_cron(self):
task_scheduler.cron_handle_bot_died()
class CronAbortExpiredShardToRunHandler(_CronHandlerBase):
"""Set tasks that haven't started before their expiration_ts timestamp as
EXPIRED.
Most of the tasks will be expired 'inline' as bots churn through the queue,
but tasks where the bots are not polling will be expired by this cron job.
"""
def run_cron(self):
task_scheduler.cron_abort_expired_task_to_run()
class CronTidyTaskQueues(_CronHandlerBase):
"""Removes unused tasks queues, the 'dimensions sets' without active task
flows.
"""
def run_cron(self):
task_queues.cron_tidy_stale()
class CronUpdateBotInfoComposite(_CronHandlerBase):
"""Updates BotInfo.composite if needed, e.g. the bot became dead because it
hasn't pinged for a while.
"""
def run_cron(self):
bot_management.cron_update_bot_info()
class CronDeleteOldBots(_CronHandlerBase):
"""Deletes old BotRoot entity groups."""
def run_cron(self):
bot_management.cron_delete_old_bot()
class CronDeleteOldBotEvents(_CronHandlerBase):
"""Deletes old BotEvent entities."""
def run_cron(self):
bot_management.cron_delete_old_bot_events()
class CronDeleteOldTasks(_CronHandlerBase):
"""Deletes old TaskRequest entities and all their decendants."""
def run_cron(self):
task_request.cron_delete_old_task_requests()
class CronNamedCachesUpdate(_CronHandlerBase):
"""Updates named caches hints."""
def run_cron(self):
named_caches.cron_update_named_caches()
class CronCountTaskBotDistributionHandler(_CronHandlerBase):
"""Counts how many runnable bots per task for monitoring."""
def run_cron(self):
task_scheduler.cron_task_bot_distribution()
class CronBotsDimensionAggregationHandler(_CronHandlerBase):
"""Aggregates all bots dimensions (except id) in the fleet."""
def run_cron(self):
bot_management.cron_aggregate_dimensions()
class CronTasksTagsAggregationHandler(_CronHandlerBase):
"""Aggregates all task tags from the last hour."""
def run_cron(self):
task_result.cron_update_tags()
class CronBotGroupsConfigHandler(_CronHandlerBase):
"""Fetches bots.cfg with all includes, assembles the final config."""
def run_cron(self):
try:
bot_groups_config.refetch_from_config_service()
except bot_groups_config.BadConfigError:
pass
class CronExternalSchedulerCancellationsHandler(_CronHandlerBase):
"""Fetches cancelled tasks from external schedulers, and cancels them."""
def run_cron(self):
task_scheduler.cron_handle_external_cancellations()
class CronExternalSchedulerGetCallbacksHandler(_CronHandlerBase):
"""Fetches callbacks requests from external schedulers, and notifies them."""
def run_cron(self):
task_scheduler.cron_handle_get_callbacks()
class CronBotsStats(_CronHandlerBase):
"""Update bots monitoring statistics."""
def run_cron(self):
stats_bots.cron_generate_stats()
class CronTasksStats(_CronHandlerBase):
"""Update tasks monitoring statistics."""
def run_cron(self):
stats_tasks.cron_generate_stats()
class CronSendToBQ(_CronHandlerBase):
"""Triggers many tasks queues to send data to BigQuery."""
def run_cron(self):
# It can trigger up to the sum of all the max_taskqueues below.
# It should complete within close to 50 seconds as each function will try to
# limit itself to its allocated chunk.
max_seconds = 50. / 2
bq_state.cron_trigger_tasks(
'task_results_run',
'/internal/taskqueue/monitoring/bq/tasks/results/run/',
'monitoring-bq-tasks-results-run',
max_seconds,
max_taskqueues=30)
bq_state.cron_trigger_tasks(
'task_results_summary',
'/internal/taskqueue/monitoring/bq/tasks/results/summary/',
'monitoring-bq-tasks-results-summary',
max_seconds,
max_taskqueues=30)
bq_state.cron_trigger_tasks(
'bot_events',
'/internal/taskqueue/monitoring/bq/bots/events/',
'monitoring-bq-bots-events',
max_seconds,
max_taskqueues=30)
bq_state.cron_trigger_tasks(
'task_requests',
'/internal/taskqueue/monitoring/bq/tasks/requests/',
'monitoring-bq-tasks-requests',
max_seconds,
max_taskqueues=30)
## Task queues.
class TaskCancelTasksHandler(webapp2.RequestHandler):
"""Cancels tasks given a list of their ids."""
@decorators.silence(datastore_utils.CommitError)
@decorators.require_taskqueue('cancel-tasks')
def post(self):
payload = json.loads(self.request.body)
logging.info('Cancelling tasks with ids: %s', payload['tasks'])
kill_running = payload['kill_running']
# TODO(maruel): Parallelize.
for task_id in payload['tasks']:
ok, was_running = task_scheduler.cancel_task_with_id(
task_id, kill_running, None)
logging.info('task %s canceled: %s was running: %s',
task_id, ok, was_running)
class TaskCancelTaskOnBotHandler(webapp2.RequestHandler):
"""Cancels a given task if it is running on the given bot.
If bot is not specified, cancel task unconditionally.
If bot is specified, and task is not running on bot, then do nothing.
"""
@decorators.require_taskqueue('cancel-task-on-bot')
def post(self):
payload = json.loads(self.request.body)
task_id = payload.get('task_id')
if not task_id:
logging.error('Missing task_id.')
return
bot_id = payload.get('bot_id')
try:
ok, was_running = task_scheduler.cancel_task_with_id(
task_id, True, bot_id)
logging.info('task %s canceled: %s was running: %s',
task_id, ok, was_running)
except ValueError:
# Ignore errors that may be due to missing or invalid tasks.
logging.warning('Ignoring a task cancellation due to exception.',
exc_info=True)
class TaskCancelChildrenTasksHandler(webapp2.RequestHandler):
"""Cancels children tasks with pending state of the given task."""
@decorators.silence(runtime.DeadlineExceededError)
@decorators.require_taskqueue('cancel-children-tasks')
def post(self):
payload = json.loads(self.request.body)
task = payload['task']
logging.info('Cancelling children tasks of task %s', task)
task_scheduler.task_cancel_running_children_tasks(task)
class TaskExpireTasksHandler(webapp2.RequestHandler):
"""Expires a list of tasks, given a list of their ids."""
@decorators.require_taskqueue('task-expire')
def post(self):
payload = json.loads(self.request.body)
task_scheduler.task_expire_tasks(payload.get('task_to_runs'))
class TaskDeleteTasksHandler(webapp2.RequestHandler):
"""Deletes a list of tasks, given a list of their ids."""
@decorators.require_taskqueue('delete-tasks')
def post(self):
payload = json.loads(self.request.body)
task_request.task_delete_tasks(payload['task_ids'])
class TaskDimensionsHandler(webapp2.RequestHandler):
"""Refreshes the active task queues."""
@decorators.silence(datastore_errors.Timeout)
@decorators.require_taskqueue('rebuild-task-cache')
def post(self):
f = task_queues.rebuild_task_cache_async(self.request.body)
if not f.get_result():
# The task likely failed due to DB transaction contention,
# so we can reply that the service has had too many requests (429).
# Using a 400-level response also prevents failures here from causing
# unactionable alerts due to a high rate of 500s.
self.response.set_status(429, 'Need to retry')
class TaskSendPubSubMessage(webapp2.RequestHandler):
"""Sends PubSub notification about task completion."""
# Add task_id to the URL for better visibility in request logs.
@decorators.require_taskqueue('pubsub')
def post(self, task_id): # pylint: disable=unused-argument
task_scheduler.task_handle_pubsub_task(json.loads(self.request.body))
class TaskESNotifyTasksHandler(webapp2.RequestHandler):
"""Sends task notifications to external scheduler."""
@decorators.require_taskqueue('es-notify-tasks')
def post(self):
es_host = self.request.get('es_host')
request_json = self.request.get('request_json')
request = plugin_pb2.NotifyTasksRequest()
json_format.Parse(request_json, request)
external_scheduler.notify_request_now(es_host, request)
class TaskESNotifyKickHandler(webapp2.RequestHandler):
"""Kicks off the pull queue worker to batch the es-notifications."""
@decorators.require_taskqueue('es-notify-kick')
def post(self):
external_scheduler.task_batch_handle_notifications()
class TaskNamedCachesPool(webapp2.RequestHandler):
"""Update named caches cache for a pool."""
@decorators.silence(datastore_errors.Timeout)
@decorators.require_taskqueue('named-cache-task')
def post(self):
params = json.loads(self.request.body)
logging.info('Handling pool: %s', params['pool'])
named_caches.task_update_pool(params['pool'])
class TaskMonitoringBotsEventsBQ(webapp2.RequestHandler):
"""Sends rows to BigQuery swarming.bot_events table."""
@decorators.require_taskqueue('monitoring-bq-bots-events')
def post(self, timestamp):
ndb.get_context().set_cache_policy(lambda _: False)
start = datetime.datetime.strptime(timestamp, u'%Y-%m-%dT%H:%M')
end = start + datetime.timedelta(seconds=60)
bot_management.task_bq_events(start, end)
class TaskMonitoringTasksRequestsBQ(webapp2.RequestHandler):
"""Sends rows to BigQuery swarming.task_requests table."""
@decorators.require_taskqueue('monitoring-bq-tasks-requests')
def post(self, timestamp):
ndb.get_context().set_cache_policy(lambda _: False)
start = datetime.datetime.strptime(timestamp, u'%Y-%m-%dT%H:%M')
end = start + datetime.timedelta(seconds=60)
task_request.task_bq(start, end)
class TaskMonitoringTasksResultsRunBQ(webapp2.RequestHandler):
"""Sends rows to BigQuery swarming.task_results_run table."""
@decorators.require_taskqueue('monitoring-bq-tasks-results-run')
def post(self, timestamp):
start = datetime.datetime.strptime(timestamp, u'%Y-%m-%dT%H:%M')
end = start + datetime.timedelta(seconds=60)
task_result.task_bq_run(start, end)
class TaskMonitoringTasksResultsSummaryBQ(webapp2.RequestHandler):
"""Sends rows to BigQuery swarming.task_results_summary table."""
@decorators.require_taskqueue('monitoring-bq-tasks-results-summary')
def post(self, timestamp):
start = datetime.datetime.strptime(timestamp, u'%Y-%m-%dT%H:%M')
end = start + datetime.timedelta(seconds=60)
task_result.task_bq_summary(start, end)
class TaskMonitoringTSMon(webapp2.RequestHandler):
"""Compute global metrics for timeseries monitoring."""
@decorators.require_taskqueue('tsmon')
def post(self, kind):
ts_mon_metrics.set_global_metrics(kind, payload=self.request.body)
###
def get_routes():
"""Returns internal urls that should only be accessible via the backend."""
routes = [
# Cron jobs.
('/internal/cron/important/scheduler/abort_bot_missing',
CronBotDiedHandler),
('/internal/cron/important/scheduler/abort_expired',
CronAbortExpiredShardToRunHandler),
('/internal/cron/cleanup/task_queues', CronTidyTaskQueues),
('/internal/cron/monitoring/bots/update_bot_info',
CronUpdateBotInfoComposite),
('/internal/cron/cleanup/bots/delete_old', CronDeleteOldBots),
('/internal/cron/cleanup/bots/delete_old_bot_events',
CronDeleteOldBotEvents),
('/internal/cron/cleanup/tasks/delete_old', CronDeleteOldTasks),
# Not yet used.
('/internal/cron/monitoring/bots/stats', CronBotsStats),
# Not yet used.
('/internal/cron/monitoring/tasks/stats', CronTasksStats),
('/internal/cron/monitoring/bq', CronSendToBQ),
('/internal/cron/monitoring/count_task_bot_distribution',
CronCountTaskBotDistributionHandler),
('/internal/cron/monitoring/bots/aggregate_dimensions',
CronBotsDimensionAggregationHandler),
('/internal/cron/monitoring/tasks/aggregate_tags',
CronTasksTagsAggregationHandler),
('/internal/cron/important/bot_groups_config', CronBotGroupsConfigHandler),
('/internal/cron/important/external_scheduler/cancellations',
CronExternalSchedulerCancellationsHandler),
('/internal/cron/important/external_scheduler/get_callbacks',
CronExternalSchedulerGetCallbacksHandler),
('/internal/cron/important/named_caches/update', CronNamedCachesUpdate),
# Task queues.
('/internal/taskqueue/important/tasks/cancel', TaskCancelTasksHandler),
('/internal/taskqueue/important/tasks/cancel-task-on-bot',
TaskCancelTaskOnBotHandler),
('/internal/taskqueue/important/tasks/cancel-children-tasks',
TaskCancelChildrenTasksHandler),
('/internal/taskqueue/important/tasks/expire',
TaskExpireTasksHandler),
('/internal/taskqueue/cleanup/tasks/delete', TaskDeleteTasksHandler),
('/internal/taskqueue/important/task_queues/rebuild-cache',
TaskDimensionsHandler),
(r'/internal/taskqueue/important/pubsub/notify-task/<task_id:[0-9a-f]+>',
TaskSendPubSubMessage),
('/internal/taskqueue/important/external_scheduler/notify-tasks',
TaskESNotifyTasksHandler),
('/internal/taskqueue/important/external_scheduler/notify-kick',
TaskESNotifyKickHandler),
(r'/internal/taskqueue/important/named_cache/update-pool',
TaskNamedCachesPool),
(r'/internal/taskqueue/monitoring/bq/bots/events/'
r'<timestamp:\d{4}-\d\d-\d\dT\d\d:\d\d>',
TaskMonitoringBotsEventsBQ),
(r'/internal/taskqueue/monitoring/bq/tasks/requests/'
r'<timestamp:\d{4}-\d\d-\d\dT\d\d:\d\d>',
TaskMonitoringTasksRequestsBQ),
(r'/internal/taskqueue/monitoring/bq/tasks/results/run/'
r'<timestamp:\d{4}-\d\d-\d\dT\d\d:\d\d>',
TaskMonitoringTasksResultsRunBQ),
(r'/internal/taskqueue/monitoring/bq/tasks/results/summary/'
r'<timestamp:\d{4}-\d\d-\d\dT\d\d:\d\d>',
TaskMonitoringTasksResultsSummaryBQ),
(r'/internal/taskqueue/monitoring/tsmon/<kind:[0-9A-Za-z_]+>',
TaskMonitoringTSMon),
]
return [webapp2.Route(*a) for a in routes]
def create_application(debug):
return webapp2.WSGIApplication(get_routes(), debug=debug)
| apache-2.0 |
0xkag/tornado | tornado/gen.py | 36 | 28109 | """``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
could be written with ``gen`` as::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
.. versionchanged:: 3.2
Dict support added.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import itertools
import sys
import types
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.ioloop import IOLoop
from tornado import stack_context
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback))
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning::
When exceptions occur inside a coroutine, the exception
information will be stored in the `.Future` object. You must
examine the result of the `.Future` object, or the exception
may go unnoticed by your code. This means yielding the function
if called from another coroutine, using something like
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = TracebackFuture()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = getattr(e, 'value', None)
except Exception:
future.set_exc_info(sys.exc_info())
return future
else:
if isinstance(result, types.GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future.set_result(getattr(e, 'value', None))
except Exception:
future.set_exc_info(sys.exc_info())
else:
Runner(result, future, yielded)
try:
return future
finally:
# Subtle memory optimization: if next() raised an exception,
# the future's exc_info contains a traceback which
# includes this stack frame. This creates a cycle,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks (relative to the refcount-based scheme
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future.set_result(result)
return future
return wrapper
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
def Task(func, *args, **kwargs):
"""Adapts a callback-based asynchronous function for use in coroutines.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
.. versionchanged:: 4.0
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
"""
future = Future()
def handle_exception(typ, value, tb):
if future.done():
return False
future.set_exc_info((typ, value, tb))
return True
def set_result(result):
if future.done():
return
future.set_result(result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
self.future = future
self.io_loop = io_loop or IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result = self.future.result()
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result
class Multi(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
Takes a list of ``YieldPoints`` or ``Futures`` and returns a list of
their responses. It is not necessary to call `Multi` explicitly,
since the engine will do so automatically when the generator yields
a list of ``YieldPoints`` or a mixture of ``YieldPoints`` and ``Futures``.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
"""
def __init__(self, children):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result = (i.get_result() for i in self.children)
if self.keys is not None:
return dict(zip(self.keys, result))
else:
return list(result)
def multi_future(children):
"""Wait for multiple asynchronous futures in parallel.
Takes a list of ``Futures`` (but *not* other ``YieldPoints``) and returns
a new Future that resolves when all the other Futures are done.
If all the ``Futures`` succeeded, the returned Future's result is a list
of their results. If any failed, the returned Future raises the exception
of the first one to fail.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
It is not necessary to call `multi_future` explcitly, since the engine will
do so automatically when the generator yields a list of `Futures`.
This function is faster than the `Multi` `YieldPoint` because it does not
require the creation of a stack context.
.. versionadded:: 4.0
"""
if isinstance(children, dict):
keys = list(children.keys())
children = children.values()
else:
keys = None
assert all(is_future(i) for i in children)
unfinished_children = set(children)
future = Future()
if not children:
future.set_result({} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
if not unfinished_children:
try:
result_list = [i.result() for i in children]
except Exception:
future.set_exc_info(sys.exc_info())
else:
if keys is not None:
future.set_result(dict(zip(keys, result_list)))
else:
future.set_result(result_list)
for f in children:
f.add_done_callback(callback)
return future
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
"""
if is_future(x):
return x
else:
fut = Future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, io_loop=None):
"""Wraps a `.Future` in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
Currently only supports Futures, not other `YieldPoint` classes.
.. versionadded:: 4.0
"""
# TODO: allow yield points in addition to futures?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
timeout_handle = io_loop.add_timeout(
timeout,
lambda: result.set_exception(TimeoutError("Timeout")))
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
_null_future = Future()
_null_future.set_result(None)
moment = Future()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 4.0
"""
moment.set_result(None)
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
try:
value = future.result()
except Exception:
self.had_exception = True
yielded = self.gen.throw(*sys.exc_info())
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.result_future.set_result(getattr(e, 'value', None))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
self.result_future.set_exc_info(sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
finally:
self.running = False
def handle_yield(self, yielded):
if isinstance(yielded, list):
if all(is_future(f) for f in yielded):
yielded = multi_future(yielded)
else:
yielded = Multi(yielded)
elif isinstance(yielded, dict):
if all(is_future(f) for f in yielded.values()):
yielded = multi_future(yielded)
else:
yielded = Multi(yielded)
if isinstance(yielded, YieldPoint):
self.future = TracebackFuture()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
elif is_future(yielded):
self.future = yielded
if not self.future.done() or self.future is moment:
self.io_loop.add_future(
self.future, lambda f: self.run())
return False
else:
self.future = TracebackFuture()
self.future.set_exception(BadYieldError(
"yielded unknown object %r" % (yielded,)))
return True
def result_callback(self, key):
return stack_context.wrap(_argument_adapter(
functools.partial(self.set_result, key)))
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = TracebackFuture()
self.future.set_exc_info((typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
| apache-2.0 |
8l/NetASM-python | netasm/examples/back_ends/soft_switch/mininet/linear_switch.py | 3 | 2367 | #!/usr/bin/python
# ################################################################################
# ##
# ## https://github.com/NetASM/NetASM-python
# ##
# ## File:
# ## linear_switch.py
# ##
# ## Project:
# ## NetASM: A Network Assembly Language for Programmable Dataplanes
# ##
# ## Author:
# ## Muhammad Shahbaz
# ##
# ## Copyright notice:
# ## Copyright (C) 2014 Princeton University
# ## Network Operations and Internet Security Lab
# ##
# ## Licence:
# ## This file is a part of the NetASM development base package.
# ##
# ## This file is free code: you can redistribute it and/or modify it under
# ## the terms of the GNU Lesser General Public License version 2.1 as
# ## published by the Free Software Foundation.
# ##
# ## This package is distributed in the hope that it will be useful, but
# ## WITHOUT ANY WARRANTY; without even the implied warranty of
# ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# ## Lesser General Public License for more details.
# ##
# ## You should have received a copy of the GNU Lesser General Public
# ## License along with the NetASM source package. If not, see
# ## http://www.gnu.org/licenses/.
__author__ = 'shahbaz'
from optparse import OptionParser
from mininet.node import RemoteController
from mininet.net import Mininet, CLI
from mininet.topo import SingleSwitchTopo, LinearTopo
from mininet.log import setLogLevel
from netasm.back_ends.soft_switch.mininet.node import NetASMSwitch
def test():
op = OptionParser()
op.add_option('--cli', action="store_true", dest="cli")
op.add_option('--ports', action="store", dest="ports")
op.set_defaults(cli=False, ports=2)
options, args = op.parse_args()
topo = LinearTopo(int(options.ports))
NetASMSwitch.CTL_ADDRESS = "127.0.0.1"
NetASMSwitch.CTL_PORT = 7791
net = Mininet(topo, switch=NetASMSwitch, autoSetMacs=True, controller=lambda name: RemoteController(name))
NetASMSwitch.start_datapath(net.switches, address="127.0.0.1", port=6633)
net.start()
if options.cli:
CLI(net)
else:
net.pingAll()
net.stop()
NetASMSwitch.stop_datapath()
if __name__ == '__main__':
# Tell mininet to print useful information
setLogLevel('info')
test() | gpl-2.0 |
CiscoSystems/vespa | neutron/tests/unit/ml2/test_agent_scheduler.py | 46 | 1360 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.tests.unit.ml2 import test_ml2_plugin
from neutron.tests.unit.openvswitch import test_agent_scheduler
class Ml2AgentSchedulerTestCase(
test_agent_scheduler.OvsAgentSchedulerTestCase):
plugin_str = test_ml2_plugin.PLUGIN_NAME
l3_plugin = ('neutron.services.l3_router.'
'l3_router_plugin.L3RouterPlugin')
class Ml2L3AgentNotifierTestCase(
test_agent_scheduler.OvsL3AgentNotifierTestCase):
plugin_str = test_ml2_plugin.PLUGIN_NAME
l3_plugin = ('neutron.services.l3_router.'
'l3_router_plugin.L3RouterPlugin')
class Ml2DhcpAgentNotifierTestCase(
test_agent_scheduler.OvsDhcpAgentNotifierTestCase):
plugin_str = test_ml2_plugin.PLUGIN_NAME
| apache-2.0 |
with-git/tensorflow | tensorflow/contrib/tensor_forest/client/eval_metrics.py | 52 | 5515 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A collection of functions to be used as evaluation metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import losses
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
INFERENCE_PROB_NAME = prediction_key.PredictionKey.PROBABILITIES
INFERENCE_PRED_NAME = prediction_key.PredictionKey.CLASSES
FEATURE_IMPORTANCE_NAME = 'global_feature_importance'
def _top_k_generator(k):
def _top_k(probabilities, targets):
targets = math_ops.to_int32(targets)
if targets.get_shape().ndims > 1:
targets = array_ops.squeeze(targets, squeeze_dims=[1])
return metric_ops.streaming_mean(nn.in_top_k(probabilities, targets, k))
return _top_k
def _accuracy(predictions, targets, weights=None):
return metric_ops.streaming_accuracy(predictions, targets, weights=weights)
def _r2(probabilities, targets, weights=None):
targets = math_ops.to_float(targets)
y_mean = math_ops.reduce_mean(targets, 0)
squares_total = math_ops.reduce_sum(math_ops.square(targets - y_mean), 0)
squares_residuals = math_ops.reduce_sum(
math_ops.square(targets - probabilities), 0)
score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
return metric_ops.streaming_mean(score, weights=weights)
def _squeeze_and_onehot(targets, depth):
targets = array_ops.squeeze(targets, squeeze_dims=[1])
return array_ops.one_hot(math_ops.to_int32(targets), depth)
def _sigmoid_entropy(probabilities, targets, weights=None):
return metric_ops.streaming_mean(
losses.sigmoid_cross_entropy(probabilities,
_squeeze_and_onehot(
targets,
array_ops.shape(probabilities)[1])),
weights=weights)
def _softmax_entropy(probabilities, targets, weights=None):
return metric_ops.streaming_mean(
losses.sparse_softmax_cross_entropy(probabilities,
math_ops.to_int32(targets)),
weights=weights)
def _predictions(predictions, unused_targets, **unused_kwargs):
return predictions
def _class_log_loss(probabilities, targets, weights=None):
return metric_ops.streaming_mean(
losses.log_loss(probabilities,
_squeeze_and_onehot(targets,
array_ops.shape(probabilities)[1])),
weights=weights)
def _precision(predictions, targets, weights=None):
return metric_ops.streaming_precision(predictions, targets, weights=weights)
def _precision_at_thresholds(predictions, targets, weights=None):
return metric_ops.streaming_precision_at_thresholds(
array_ops.slice(predictions, [0, 1], [-1, 1]),
targets,
np.arange(
0, 1, 0.01, dtype=np.float32),
weights=weights)
def _recall(predictions, targets, weights=None):
return metric_ops.streaming_recall(predictions, targets, weights=weights)
def _recall_at_thresholds(predictions, targets, weights=None):
return metric_ops.streaming_recall_at_thresholds(
array_ops.slice(predictions, [0, 1], [-1, 1]),
targets,
np.arange(
0, 1, 0.01, dtype=np.float32),
weights=weights)
def _auc(probs, targets, weights=None):
return metric_ops.streaming_auc(array_ops.slice(probs, [0, 1], [-1, 1]),
targets, weights=weights)
_EVAL_METRICS = {
'auc': _auc,
'sigmoid_entropy': _sigmoid_entropy,
'softmax_entropy': _softmax_entropy,
'accuracy': _accuracy,
'r2': _r2,
'predictions': _predictions,
'classification_log_loss': _class_log_loss,
'precision': _precision,
'precision_at_thresholds': _precision_at_thresholds,
'recall': _recall,
'recall_at_thresholds': _recall_at_thresholds,
'top_5': _top_k_generator(5)
}
_PREDICTION_KEYS = {
'auc': INFERENCE_PROB_NAME,
'sigmoid_entropy': INFERENCE_PROB_NAME,
'softmax_entropy': INFERENCE_PROB_NAME,
'accuracy': INFERENCE_PRED_NAME,
'r2': prediction_key.PredictionKey.SCORES,
'predictions': INFERENCE_PRED_NAME,
'classification_log_loss': INFERENCE_PROB_NAME,
'precision': INFERENCE_PRED_NAME,
'precision_at_thresholds': INFERENCE_PROB_NAME,
'recall': INFERENCE_PRED_NAME,
'recall_at_thresholds': INFERENCE_PROB_NAME,
'top_5': INFERENCE_PROB_NAME
}
def get_metric(metric_name):
"""Given a metric name, return the corresponding metric function."""
return _EVAL_METRICS[metric_name]
def get_prediction_key(metric_name):
return _PREDICTION_KEYS[metric_name]
| apache-2.0 |
jac2130/BayesGame | pywebsocket-read-only/src/mod_pywebsocket/xhr_benchmark_handler.py | 76 | 3946 | # Copyright 2014 Google Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the COPYING file or at
# https://developers.google.com/open-source/licenses/bsd
from mod_pywebsocket import util
class XHRBenchmarkHandler(object):
def __init__(self, headers, rfile, wfile):
self._logger = util.get_class_logger(self)
self.headers = headers
self.rfile = rfile
self.wfile = wfile
def do_send(self):
content_length = int(self.headers.getheader('Content-Length'))
self._logger.debug('Requested to receive %s bytes', content_length)
RECEIVE_BLOCK_SIZE = 1024 * 1024
bytes_to_receive = content_length
while bytes_to_receive > 0:
bytes_to_receive_in_this_loop = bytes_to_receive
if bytes_to_receive_in_this_loop > RECEIVE_BLOCK_SIZE:
bytes_to_receive_in_this_loop = RECEIVE_BLOCK_SIZE
received_data = self.rfile.read(bytes_to_receive_in_this_loop)
for c in received_data:
if c != 'a':
self._logger.debug('Request body verification failed')
return
bytes_to_receive -= len(received_data)
if bytes_to_receive < 0:
self._logger.debug('Received %d more bytes than expected' %
(-bytes_to_receive))
return
# Return the number of received bytes back to the client.
response_body = '%d' % content_length
self.wfile.write(
'HTTP/1.1 200 OK\r\n'
'Content-Type: text/html\r\n'
'Content-Length: %d\r\n'
'\r\n%s' % (len(response_body), response_body))
self.wfile.flush()
def do_receive(self):
content_length = int(self.headers.getheader('Content-Length'))
request_body = self.rfile.read(content_length)
request_array = request_body.split(' ')
if len(request_array) < 2:
self._logger.debug('Malformed request body: %r', request_body)
return
# Parse the size parameter.
bytes_to_send = request_array[0]
try:
bytes_to_send = int(bytes_to_send)
except ValueError, e:
self._logger.debug('Malformed size parameter: %r', bytes_to_send)
return
self._logger.debug('Requested to send %s bytes', bytes_to_send)
# Parse the transfer encoding parameter.
chunked_mode = False
mode_parameter = request_array[1]
if mode_parameter == 'chunked':
self._logger.debug('Requested chunked transfer encoding')
chunked_mode = True
elif mode_parameter != 'none':
self._logger.debug('Invalid mode parameter: %r', mode_parameter)
return
# Write a header
response_header = (
'HTTP/1.1 200 OK\r\n'
'Content-Type: application/octet-stream\r\n')
if chunked_mode:
response_header += 'Transfer-Encoding: chunked\r\n\r\n'
else:
response_header += (
'Content-Length: %d\r\n\r\n' % bytes_to_send)
self.wfile.write(response_header)
self.wfile.flush()
# Write a body
SEND_BLOCK_SIZE = 1024 * 1024
while bytes_to_send > 0:
bytes_to_send_in_this_loop = bytes_to_send
if bytes_to_send_in_this_loop > SEND_BLOCK_SIZE:
bytes_to_send_in_this_loop = SEND_BLOCK_SIZE
if chunked_mode:
self.wfile.write('%x\r\n' % bytes_to_send_in_this_loop)
self.wfile.write('a' * bytes_to_send_in_this_loop)
if chunked_mode:
self.wfile.write('\r\n')
self.wfile.flush()
bytes_to_send -= bytes_to_send_in_this_loop
if chunked_mode:
self.wfile.write('0\r\n\r\n')
self.wfile.flush()
| mit |
philanthropy-u/edx-platform | openedx/features/journals/views/learner_dashboard.py | 4 | 2148 | """ Journal Tab of Learner Dashboard views """
from datetime import datetime, time
import logging
from django.http import Http404
from edxmako.shortcuts import render_to_response
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.features.journals.api import (
fetch_journal_access,
journals_enabled,
)
logger = logging.getLogger(__name__)
def journal_listing(request):
""" View a list of journals which the user has or had access to"""
user = request.user
if not journals_enabled() or not user.is_authenticated():
raise Http404
journals = fetch_journal_access(
site=request.site,
user=request.user
)
context = {
'journals': journals,
'show_dashboard_tabs': True,
'show_program_listing': ProgramsApiConfig.is_enabled(),
'show_journal_listing': journals_enabled()
}
return render_to_response('journals/learner_dashboard/journal_dashboard.html', context)
def format_expiration_date(expiration_date):
"""
Formats Expiration Date
Arguments:
expiration_date (str): in format 'YYYY-mm-dd' (ex. April 26, 2018 is: '2018-26-04')
Returns:
formatted expiration date (str): in format 'Mmm dd YYYY' (ex. April 26, 2018 is: 'Apr 26 2018')
"""
# set expiration date to be the last second of the day it expires
expiration_datetime = datetime.combine(
date=datetime.strptime(expiration_date, '%Y-%m-%d').date(),
time=time.max
)
return expiration_datetime.strftime("%b %d %Y")
def has_access_expired(expiration_date):
"""
Returns true if it is now past the expiration date.
Arguments:
expiration_date (str): in format 'YYYY-mm-dd' (ex. April 26, 2018 is: '2018-26-04')
Returns:
has access expired (boolean): True if access has expired
"""
# set expiration date to be the last second of the day it expires
expiration_datetime = datetime.combine(
date=datetime.strptime(expiration_date, '%Y-%m-%d').date(),
time=time.max
)
now = datetime.today()
return now > expiration_datetime
| agpl-3.0 |
Amechi101/concepteur-market-app | venv/lib/python2.7/site-packages/PIL/ImagePalette.py | 7 | 5792 | #
# The Python Imaging Library.
# $Id$
#
# image palette object
#
# History:
# 1996-03-11 fl Rewritten.
# 1997-01-03 fl Up and running.
# 1997-08-23 fl Added load hack
# 2001-04-16 fl Fixed randint shadow bug in random()
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1996-1997 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import array
from PIL import Image, ImageColor
class ImagePalette:
"Color palette for palette mapped images"
def __init__(self, mode = "RGB", palette = None):
self.mode = mode
self.rawmode = None # if set, palette contains raw data
self.palette = palette or list(range(256))*len(self.mode)
self.colors = {}
self.dirty = None
if len(self.mode)*256 != len(self.palette):
raise ValueError("wrong palette size")
def getdata(self):
"""
Get palette contents in format suitable # for the low-level
``im.putpalette`` primitive.
.. warning:: This method is experimental.
"""
if self.rawmode:
return self.rawmode, self.palette
return self.mode + ";L", self.tobytes()
def tobytes(self):
"""Convert palette to bytes.
.. warning:: This method is experimental.
"""
if self.rawmode:
raise ValueError("palette contains raw palette data")
if isinstance(self.palette, bytes):
return self.palette
arr = array.array("B", self.palette)
if hasattr(arr, 'tobytes'):
#py3k has a tobytes, tostring is deprecated.
return arr.tobytes()
return arr.tostring()
# Declare tostring as an alias for tobytes
tostring = tobytes
def getcolor(self, color):
"""Given an rgb tuple, allocate palette entry.
.. warning:: This method is experimental.
"""
if self.rawmode:
raise ValueError("palette contains raw palette data")
if isinstance(color, tuple):
try:
return self.colors[color]
except KeyError:
# allocate new color slot
if isinstance(self.palette, bytes):
self.palette = [int(x) for x in self.palette]
index = len(self.colors)
if index >= 256:
raise ValueError("cannot allocate more than 256 colors")
self.colors[color] = index
self.palette[index] = color[0]
self.palette[index+256] = color[1]
self.palette[index+512] = color[2]
self.dirty = 1
return index
else:
raise ValueError("unknown color specifier: %r" % color)
def save(self, fp):
"""Save palette to text file.
.. warning:: This method is experimental.
"""
if self.rawmode:
raise ValueError("palette contains raw palette data")
if isinstance(fp, str):
fp = open(fp, "w")
fp.write("# Palette\n")
fp.write("# Mode: %s\n" % self.mode)
for i in range(256):
fp.write("%d" % i)
for j in range(i, len(self.palette), 256):
fp.write(" %d" % self.palette[j])
fp.write("\n")
fp.close()
# --------------------------------------------------------------------
# Internal
def raw(rawmode, data):
palette = ImagePalette()
palette.rawmode = rawmode
palette.palette = data
palette.dirty = 1
return palette
# --------------------------------------------------------------------
# Factories
def _make_linear_lut(black, white):
lut = []
if black == 0:
for i in range(256):
lut.append(white*i//255)
else:
raise NotImplementedError # FIXME
return lut
def _make_gamma_lut(exp, mode="RGB"):
lut = []
for i in range(256):
lut.append(int(((i / 255.0) ** exp) * 255.0 + 0.5))
return lut
def new(mode, data):
return Image.core.new_palette(mode, data)
def negative(mode="RGB"):
palette = list(range(256))
palette.reverse()
return ImagePalette(mode, palette * len(mode))
def random(mode="RGB"):
from random import randint
palette = []
for i in range(256*len(mode)):
palette.append(randint(0, 255))
return ImagePalette(mode, palette)
def sepia(white="#fff0c0"):
r, g, b = ImageColor.getrgb(white)
r = _make_linear_lut(0, r)
g = _make_linear_lut(0, g)
b = _make_linear_lut(0, b)
return ImagePalette("RGB", r + g + b)
def wedge(mode="RGB"):
return ImagePalette(mode, list(range(256)) * len(mode))
def load(filename):
# FIXME: supports GIMP gradients only
fp = open(filename, "rb")
lut = None
if not lut:
try:
from PIL import GimpPaletteFile
fp.seek(0)
p = GimpPaletteFile.GimpPaletteFile(fp)
lut = p.getpalette()
except (SyntaxError, ValueError):
#import traceback
#traceback.print_exc()
pass
if not lut:
try:
from PIL import GimpGradientFile
fp.seek(0)
p = GimpGradientFile.GimpGradientFile(fp)
lut = p.getpalette()
except (SyntaxError, ValueError):
#import traceback
#traceback.print_exc()
pass
if not lut:
try:
from PIL import PaletteFile
fp.seek(0)
p = PaletteFile.PaletteFile(fp)
lut = p.getpalette()
except (SyntaxError, ValueError):
import traceback
traceback.print_exc()
pass
if not lut:
raise IOError("cannot load palette")
return lut # data, rawmode
| mit |
wuxianghou/phantomjs | src/qt/qtwebkit/Source/ThirdParty/gtest/test/gtest_nc_test.py | 277 | 3758 | #!/usr/bin/env python
#
# Copyright 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Negative compilation test for Google Test."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import unittest
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
if not IS_LINUX:
sys.exit(0) # Negative compilation tests are not supported on Windows & Mac.
class GTestNCTest(unittest.TestCase):
"""Negative compilation test for Google Test."""
def testCompilerError(self):
"""Verifies that erroneous code leads to expected compiler
messages."""
# Defines a list of test specs, where each element is a tuple
# (test name, list of regexes for matching the compiler errors).
test_specs = [
('CANNOT_IGNORE_RUN_ALL_TESTS_RESULT',
[r'ignoring return value']),
('USER_CANNOT_INCLUDE_GTEST_INTERNAL_INL_H',
[r'must not be included except by Google Test itself']),
('CATCHES_DECLARING_SETUP_IN_TEST_FIXTURE_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_CALLING_SETUP_IN_TEST_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_DECLARING_SETUP_IN_ENVIRONMENT_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_CALLING_SETUP_IN_ENVIRONMENT_WITH_TYPO',
[r'Setup_should_be_spelled_SetUp']),
('CATCHES_WRONG_CASE_IN_TYPED_TEST_P',
[r'BarTest.*was not declared']),
('CATCHES_WRONG_CASE_IN_REGISTER_TYPED_TEST_CASE_P',
[r'BarTest.*was not declared']),
('CATCHES_WRONG_CASE_IN_INSTANTIATE_TYPED_TEST_CASE_P',
[r'BarTest.*not declared']),
('CATCHES_INSTANTIATE_TYPED_TESET_CASE_P_WITH_SAME_NAME_PREFIX',
[r'redefinition of.*My.*FooTest']),
('STATIC_ASSERT_TYPE_EQ_IS_NOT_A_TYPE',
[r'StaticAssertTypeEq.* does not name a type']),
('STATIC_ASSERT_TYPE_EQ_WORKS_IN_NAMESPACE',
[r'StaticAssertTypeEq.*int.*const int']),
('STATIC_ASSERT_TYPE_EQ_WORKS_IN_CLASS',
[r'StaticAssertTypeEq.*int.*bool']),
('STATIC_ASSERT_TYPE_EQ_WORKS_IN_FUNCTION',
[r'StaticAssertTypeEq.*const int.*int']),
('SANITY',
None)
]
# TODO(wan@google.com): verify that the test specs are satisfied.
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
NoahFlowa/glowing-spoon | venv/lib/python2.7/site-packages/pip/_vendor/lockfile/pidlockfile.py | 536 | 6090 | # -*- coding: utf-8 -*-
# pidlockfile.py
#
# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
import errno
import os
import time
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
LockTimeout)
class PIDLockFile(LockBase):
""" Lockfile implemented as a Unix PID file.
The lock file is a normal file named by the attribute `path`.
A lock's PID file contains a single line of text, containing
the process ID (PID) of the process that acquired the lock.
>>> lock = PIDLockFile('somefile')
>>> lock = PIDLockFile('somefile')
"""
def __init__(self, path, threaded=False, timeout=None):
# pid lockfiles don't support threaded operation, so always force
# False as the threaded arg.
LockBase.__init__(self, path, False, timeout)
self.unique_name = self.path
def read_pid(self):
""" Get the PID from the lock file.
"""
return read_pid_from_pidfile(self.path)
def is_locked(self):
""" Test if the lock is currently held.
The lock is held if the PID file for this lock exists.
"""
return os.path.exists(self.path)
def i_am_locking(self):
""" Test if the lock is held by the current process.
Returns ``True`` if the current process ID matches the
number stored in the PID file.
"""
return self.is_locked() and os.getpid() == self.read_pid()
def acquire(self, timeout=None):
""" Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
"""
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
try:
write_pid_to_pidfile(self.path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# The lock creation failed. Maybe sleep a bit.
if time.time() > end_time:
if timeout is not None and timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout / 10 or 0.1)
else:
raise LockFailed("failed to create %s" % self.path)
else:
return
def release(self):
""" Release the lock.
Removes the PID file to release the lock, or raises an
error if the current process does not hold the lock.
"""
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
remove_existing_pidfile(self.path)
def break_lock(self):
""" Break an existing lock.
Removes the PID file if it already exists, otherwise does
nothing.
"""
remove_existing_pidfile(self.path)
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
"""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = 0o644
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
pidfile.write("%s\n" % pid)
pidfile.close()
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
| apache-2.0 |
unicri/edx-platform | common/lib/xmodule/xmodule/tests/xml/test_policy.py | 248 | 1262 | """
Tests that policy json files import correctly when loading XML
"""
from nose.tools import assert_equals, assert_raises # pylint: disable=no-name-in-module
from xmodule.tests.xml.factories import CourseFactory
from xmodule.tests.xml import XModuleXmlImportTest
class TestPolicy(XModuleXmlImportTest):
"""
Tests that policy json files import correctly when loading xml
"""
def test_no_attribute_mapping(self):
# Policy files are json, and thus the values aren't passed through 'deserialize_field'
# Therefor, the string 'null' is passed unchanged to the Float field, which will trigger
# a ValueError
with assert_raises(ValueError):
course = self.process_xml(CourseFactory.build(policy={'days_early_for_beta': 'null'}))
# Trigger the exception by looking at the imported data
course.days_early_for_beta # pylint: disable=pointless-statement
def test_course_policy(self):
course = self.process_xml(CourseFactory.build(policy={'days_early_for_beta': None}))
assert_equals(None, course.days_early_for_beta)
course = self.process_xml(CourseFactory.build(policy={'days_early_for_beta': 9}))
assert_equals(9, course.days_early_for_beta)
| agpl-3.0 |
jadnohra/handle_file | lztex.py | 1 | 18515 | import os,sys
import copy, json
import re
g_dbg = '-dbg' in sys.argv or False
g_force_keep_indent = '-force_keep_indent' in sys.argv
g_kill_indent = True
g_enable_lzmath = False if '-no_lzmath' in sys.argv else True
g_re1 = re.compile(r"(\\)([A-Z])\b")
g_re1_subst = '\mathbb{\\2}'
g_re2 = re.compile(r"(])([A-Z])\b")
g_re2_subst = '\mathcal{\\2}'
try:
import mako.template as mako_temp
except ImportError:
mako_temp = None
pass
k_vt_col_map = { '':'\x1b[0m', 'default':'\x1b[0m', 'black':'\x1b[30m', 'red':'\x1b[31m', 'green':'\x1b[32m', 'yellow':'\x1b[33m',
'blue':'\x1b[34m', 'magenta':'\x1b[35m', 'cyan':'\x1b[36m', 'white':'\x1b[37m',
'bdefault':'\x1b[49m', 'bblack':'\x1b[40m', 'bred':'\x1b[41m', 'bgreen':'\x1b[42m', 'byellow':'\x1b[43m',
'bblue':'\x1b[44m', 'bmagenta':'\x1b[45m', 'bcyan':'\x1b[46m', 'bwhite':'\x1b[47m' }
vt_cm = k_vt_col_map
def set_vt_col(col):
sys.stdout.write(k_vt_col_map[col])
def unistr(str):
if not isinstance(str, unicode):
return unicode(str, "utf-8")
return str
largv = []
def largv_has(keys):
for i in range(len(keys)):
if (keys[i] in largv):
return True
return False
def largv_has_key(keys):
for key in keys:
ki = largv.index(key) if key in largv else -1
if (ki >= 0 and ki+1 < len(largv)):
return True
return False
def largv_get(keys, dflt):
if ( hasattr(sys, 'argv')):
for key in keys:
ki = largv.index(key) if key in largv else -1
if (ki >= 0 and ki+1 < len(largv)):
return largv[ki+1]
return dflt
def largv_geti(i, dflt):
if (i >= len(largv)):
return dflt
return largv[i]
def tex_escape(text):
"""
:param text: a plain text message
:return: the message escaped to appear correctly in LaTeX
"""
conv = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'~': r'\textasciitilde{}',
'^': r'\^{}',
'\\': r'\textbackslash{}',
'<': r'\textless',
'>': r'\textgreater',
}
regex = re.compile('|'.join(re.escape(unicode(key)) for key in sorted(conv.keys(), key = lambda item: - len(item))))
out = regex.sub(lambda match: conv[match.group()], text)
return out
def main():
global g_kill_indent
k_meta_titles = ['ignore', 'bib']
k_titles = ['keep', 'list', ':list', 'llist', ':llist', 'bullets', ':bullets', 'bbullets', ':bbullets', 'cases', ':cases', "eqn", "eqns" ,"eqns*", 'mm', 'm', 'notes', 'sections',
'copy', 'tex', 'table', 'mtable', 'par', 'underline', 'footnote', 'foot', 'say', 'quote', 'footurl', 'onecol', 'tex_esc', 'href', 'url', ':colors', 'cite_all']
k_titles2 = ['table', 'mtable', 'bullets', 'list', 'color', 'mark']
def get_title(ptitle, out):
if ptitle in k_titles:
out[0] = ptitle; out[1] = ''; return True;
elif ptitle.split(' ')[0] in k_titles2:
ptitle_splt = ptitle.split()
out[0] = ptitle_splt[0]; out[1] = ' '.join(ptitle_splt[1:]); out[2] = {};
if '%%' in out[1]:
params = '%%'.join(out[1].split('%%')[1:])
out[2] = json.loads(params)
return True
out[0] = ''; out[1] = ''; out[2] = {};
return False
bib_out_lines = []
ifp = largv[1]
lvl_lines = []; file_start = True;
li = 0
with open(ifp, "r") as ifile:
ignore_depth = -1; ignore_is_bib = False;
for line in ifile.readlines():
li = li + 1
if file_start and (line.startswith('#') or line.strip() == ''):
pass
else:
if '\t \t' in line:
set_vt_col('yellow'); print 'Warning: space between tabs at line {}: "{}..."'.format(li, line.strip()[:5])
file_start = False
lvli = 0
while lvli < len(line) and line[lvli] == "\t":
lvli = lvli+1
lvl_content = line[lvli:].rstrip()
if ignore_depth == -1 or lvli <= ignore_depth:
if lvl_content not in k_meta_titles:
ignore_depth = -1
lvl_lines.append([lvli, lvl_content, li])
else:
ignore_depth = lvli; ignore_is_bib = (lvl_content == 'bib');
else:
if ignore_is_bib:
bib_out_lines.append(lvl_content)
bib_cite_ids = []
if len(bib_out_lines):
bib_ofp = '{}{}'.format(os.path.splitext(ifp)[0], '.bib')
bib_fout = open(bib_ofp, 'w+')
bib_fout.write('% Auto-generated by lztex from [{}]\n\n'.format(os.path.split(ifp)[1]))
bib_fout.write('\n'.join(bib_out_lines))
regex = r"(\s|^)@.+{(.+),"
for bib_line in bib_out_lines:
matches = re.finditer(regex, bib_line)
for match in matches:
bib_cite_ids.append(match.groups()[1])
#print lvl_lines
def print_node_tree(node):
def rep_parent(node):
node['parent'] = node['parent']['line'] if node['parent'] else -1
for n in node['children']:
rep_parent(n)
nnode = copy.deepcopy(node)
rep_parent(nnode)
print json.dumps(nnode, indent=1)
def add_lines_recurse(rel_node, lvl_lines, i):
def print_err():
set_vt_col('red'); print 'Error: indent at line {}: "{}..."'.format(line[2], line[1][:5])
line = lvl_lines[i]
lvl_diff = line[0]-rel_node['lvl']
title_mark = ''
rec_parent = None
if lvl_diff > 0 and rel_node['title'] == 'copy':
parent = rel_node
title_info = ['', '', '']
nrel_node = parent
else:
while lvl_diff < 0:
rel_node = rel_node['parent']; lvl_diff = lvl_diff+1;
if rel_node == None:
print_err(); return False;
parent = rel_node['parent'] if lvl_diff == 0 else (rel_node if lvl_diff == 1 else None)
if parent != None and parent['title'] in ['keep','mark']:
parent = parent['parent']
title_mark = parent['title_opts']
title_info = ['', '', '']
get_title(line[1], title_info)
nrel_node = None
if parent == None:
print_err(); return False;
node_title = title_info[0]
if node_title == '':
if parent['title'] in ['llist', 'bbullets']:
rec_parent = parent
if parent['rec_parent'] is not None:
rec_parent = parent['rec_parent']
line_node = {'parent':parent, 'rec_parent':rec_parent, 'line': line[2], 'lvl':line[0], 'content':line[1], 'children':[], 'title':node_title, 'title_opts':title_info[1], 'title_params':title_info[2], 'title_mark':title_mark, 'lvl_state':{}}
if parent['title'] in ['table', 'mtable']:
is_sep = line_node['content'] in ['-', '--']
is_ext_sep = any([line_node['content'].startswith(x) for x in ['- ', '-- ']])
if is_sep or is_ext_sep:
line_node['table_row_sep'] = True
if is_ext_sep:
line_node['title_opts'] = ' '.join(line_node['content'].split(' ')[1:])
if len(parent['children']):
parent['children'][-1]['table_last_row'] = True
if line_node['title'] not in ['keep', 'mark']:
parent['children'].append(line_node)
if i+1 < len(lvl_lines):
if add_lines_recurse(nrel_node if nrel_node else line_node, lvl_lines, i+1) == False:
return False
if line_node['title'] in ['table', 'mtable']:
if len(line_node['children']):
line_node['children'][-1]['table_last_row'] = True
if parent['title'] == 'cases':
parent['children'][-1]['case_last_row'] = True
return True
root_node = {'parent':None, 'rec_parent':None, 'line':-1, 'lvl':-1, 'children':[], 'title':'_root', 'title_opts':'', 'title_params':{}, 'lvl_state':{}}
add_lines_recurse(root_node, lvl_lines, 0)
#print_node_tree(root_node)
fout = sys.stdout
if largv_has(['-o']):
ofp = largv_get(['-o'], '')
if g_force_keep_indent == False and (os.path.splitext(ofp)[1] == '.md'): # Markdown will treat tabbed text as verbatim, we don't want this.
g_kill_indent = True
fout = open(ofp, 'w+')
def do_lzmath(strng):
def do_rep(strng):
sub = strng
sub = re.sub(g_re1, g_re1_subst, sub)
sub = re.sub(g_re2, g_re2_subst, sub)
return sub
def do_split_2(strng, markers, reps):
splt = strng.split(markers[0])
strng1 = ''
is_open = False
for i in range(len(splt)):
is_open = (i%2 == 1)
if is_open:
end_splt = splt[i].split(markers[1])
strng1 = strng1 + reps[0] + do_rep(end_splt[0]) + reps[1] + (end_splt[1] if len(end_splt) > 1 else '')
else:
strng1 = strng1 + splt[i]
return strng1
def do_split_1(strng, marker, reps):
splt = strng.split(marker)
strng1 = ''
is_open = False
for i in range(len(splt)):
is_open = (i%2 == 1)
if is_open:
xfm = do_split_2( do_split_2(splt[i], ('{', '}'), ('\\{','\\}')), (' ', ' '), ('{','}'))
strng1 = strng1 + reps[0] + do_rep(xfm) + reps[1]
else:
strng1 = strng1 + splt[i]
return strng1
if g_enable_lzmath:
strng1 = do_split_1(strng, '\t\t', (' $$', '$$ '))
strng2 = do_split_1(strng1, '\t', (' $', '$ '))
return strng2
else:
return strng
def indented_str(node, lvl_state, strng):
is_copy_node = (lvl_state['title_node'] is not None) and (lvl_state['title_node']['title'] == 'copy')
if is_copy_node == False:
if (g_kill_indent):
return strng
else:
return ''.join(['\t']*node['lvl']) + strng
else:
return ''.join(['\t']*(node['lvl']-1)) + strng
def begin_line(lvl, node, lvl_state):
line = node['content']
if lvl['title'] == 'sections':
print >>fout, '\n', ''.join(['#']*(lvl_state.get('section', 0))),
elif lvl['title'] == 'notes':
print >>fout, indented_str(node, lvl_state, '\\begin{note}')
elif lvl['title'] == 'href':
print >>fout, indented_str(node, lvl_state, '{'),
elif (lvl['title'] in ['list', 'bullets', 'llist', 'bbullets']) or (lvl['rec_parent'] is not None and lvl['rec_parent']['title'] in ['llist', 'bbullets']):
print >>fout, indented_str(node, lvl_state, '\\item')
#elif lvl['title'] == 'mtable':
# print >>fout, indented_str(node, lvl_state, '$'),
def end_line(lvl, node, lvl_state):
line = node['content']
if lvl['title'] == 'notes':
print >>fout, indented_str(node, lvl_state, '\\end{note}')
elif lvl['title'] in ['table', 'mtable']:
if node.get('table_row_sep', False):
lvl_state['row_cnt'] = lvl_state['row_cnt'] + 1
lvl_state['col_cnt'] = 0
sep_tex = ''
if line.startswith('--'):
sep_tex = lvl['title_params'].get('--', '\\\\ \hline')
else:
sep_tex = lvl['title_params'].get('-', '\\\\')
print >>fout, indented_str(node, lvl_state, '{} {}'.format(sep_tex, node['title_opts']))
else:
if node.get('table_last_row', False) == False:
print >>fout, indented_str(node, lvl_state, '& '),
lvl_state['col_cnt'] = lvl_state['col_cnt'] + 1
#if lvl['title'] == 'mtable':
# print >>fout, indented_str(node, lvl_state, '$')
elif lvl['title'] == 'cases':
if node.get('cases_last_row', False):
print >>fout, indented_str(node, lvl_state, '\\\\ '),
elif lvl['title'] == 'href':
print >>fout, indented_str(node, lvl_state, '}'),
def do_line(lvl, node, lvl_state, glob_state):
def print_content(node, lvl_state, strng, line_ret = True, enable_lzmath = False):
if strng != 'blank':
print >>fout, indented_str(node, lvl_state, do_lzmath(strng) if enable_lzmath else strng),
if line_ret:
print >>fout, ''
if lvl['title'].startswith(':'):
tag = lvl['title'][1:]
if 'tag' == 'colors':
if tag in glob_state['settings']:
glob_state['settings'][tag].append(node['content'])
else:
glob_state['settings'][tag] = [node['content']]
else:
glob_state['settings'][tag] = node['content']
else:
if lvl['title'] in ['table', 'mtable']:
if node.get('table_row_sep', False):
return
elif lvl['title'] in ['tex_esc', 'footurl', 'say', 'url']:
print_content(node, lvl_state, tex_escape(node['content']))
return
elif lvl['title'] in ['quote']:
print_content(node, lvl_state, node['content'].replace('- ', ''))
return
print_content(node, lvl_state, node['content'], lvl['title'] not in ['href'], True)
def begin_lvl(lvl, lvl_state, title_node, glob_state):
def get_title_opt(lvl):
opt = lvl.get('title_opts', '')
return opt if len(opt) else glob_state['settings'].get(lvl['title'],'')
lvl_state['title_node'] = title_node # TODO, do this during pre-processing, add it to lvl instead of lvl_state
if lvl['title'] == 'sections':
glob_state['section'] = glob_state.get('section', 0)+1
lvl_state['section'] = glob_state['section']
elif lvl['title'] in ['list', 'llist']:
print >>fout, indented_str(lvl, lvl_state, '{} {}'.format('\\begin{enumerate}', get_title_opt(lvl) ))
elif lvl['title'] in ['bullets', 'bbullets']:
print >>fout, indented_str(lvl, lvl_state, '{} {}'.format('\\begin{itemize}', get_title_opt(lvl) ))
elif lvl['title'] == 'cases':
print >>fout, indented_str(lvl, lvl_state, '{} {}'.format('\\begin{cases}', get_title_opt(lvl) ))
elif lvl['title'] == 'onecol':
print >>fout, indented_str(lvl, lvl_state, '{} {}'.format('\\begin{strip}', lvl.get('title_opts', '')))
elif lvl['title'] == 'eqns*':
print >>fout, indented_str(lvl, lvl_state, '\\begin{align*}')
elif lvl['title'] == 'eqns':
print >>fout, indented_str(lvl, lvl_state, '\\begin{align}')
elif lvl['title'] == 'eqn':
print >>fout, indented_str(lvl, lvl_state, '\\begin{equation}')
elif lvl['title'] == 'mm':
print >>fout, indented_str(lvl, lvl_state, '$$')
elif lvl['title'] == 'm':
print >>fout, indented_str(lvl, lvl_state, '$')
elif lvl['title'] in ['table', 'mtable']:
lvl_state['row_cnt'] = 0; lvl_state['col_cnt'] = 0;
if any([x in lvl['title_params'] for x in ['col', 'row']]):
lvl_state['has_group'] = True
print >>fout, indented_str(lvl, lvl_state, '\\begingroup')
if 'row' in lvl['title_params']:
row_cmd = '\\renewcommand{{\\arraystretch}}{{ {} }}'.format(lvl['title_params']['row'])
print >>fout, indented_str(lvl, lvl_state, row_cmd)
if 'col' in lvl['title_params']:
col_cmd = '\\setlength{{\\tabcolsep}}{{ {} }}'.format(lvl['title_params']['col'])
print >>fout, indented_str(lvl, lvl_state, col_cmd)
print >>fout, indented_str(lvl, lvl_state, '\\begin{tabular}'),
print >>fout, lvl['title_opts']
elif lvl['title'] == 'tex':
print >>fout, indented_str(lvl, lvl_state, '\\begin{identity}')
elif lvl['title'] == 'par':
print >>fout, indented_str(lvl, lvl_state, '\\par')
elif lvl['title'] in ['footnote', 'foot']:
print >>fout, indented_str(lvl, lvl_state, '\\footnote{')
elif lvl['title'] == 'footurl':
print >>fout, indented_str(lvl, lvl_state, '\\footnote{\\url{')
elif lvl['title'] in ['say']:
print >>fout, indented_str(lvl, lvl_state, '\\say{')
elif lvl['title'] in ['underline']:
print >>fout, indented_str(lvl, lvl_state, '\\ul{')
elif lvl['title'] in ['quote']:
print >>fout, indented_str(lvl, lvl_state, '\\begin{quote}')
elif lvl['title'] == 'href':
print >>fout, indented_str(lvl, lvl_state, '\\href'),
elif lvl['title'] == 'url':
print >>fout, indented_str(lvl, lvl_state, '\\url{'),
elif lvl['title'] == 'color':
colors = lvl['title_opts'].split(' ')
cmd_fore = ('\\color{{{}}}' if '{' not in colors[0] else '\\color{}').format(colors[0])
cmd_back = '\\colorbox{{{}}}'.format(colors[1] if '{' not in colors[1] else colors[1]) if (len(colors) >= 2) else ''
cmd_par = '\\parbox{0.9\\textwidth}' if len(cmd_back) else ''
print >>fout, indented_str(lvl, lvl_state, '\\begingroup {}{}{{{}'.format(cmd_fore, cmd_back, cmd_par))
elif lvl['title'] == 'keep':
1
elif lvl['title'] == 'cite_all':
if len(bib_cite_ids):
cite_list = ', '.join(['@{}'.format(x) for x in bib_cite_ids])
print >>fout, '--- \nnocite: |\n {}\n--- \n'.format(cite_list)
elif lvl['title'] == '' and len(lvl['children']) > 0:
if lvl['rec_parent'] is not None:
if lvl['rec_parent']['title'] == 'llist':
print >>fout, indented_str(lvl['rec_parent'], lvl_state, '{} {}'.format('\\begin{enumerate}', get_title_opt(lvl['rec_parent']) ))
elif lvl['rec_parent']['title'] == 'bbullets':
print >>fout, indented_str(lvl['rec_parent'], lvl_state, '{} {}'.format('\\begin{itemize}', get_title_opt(lvl['rec_parent']) ))
def end_lvl(lvl, lvl_state, glob_state):
if lvl['title'] == 'sections':
glob_state['section'] = glob_state.get('section', 0)-1
elif lvl['title'] in ['list', 'llist']:
print >>fout, indented_str(lvl, lvl_state, '\\end{enumerate}')
elif lvl['title'] in ['bullets', 'bbullets']:
print >>fout, indented_str(lvl, lvl_state, '\\end{itemize}')
elif lvl['title'] == 'cases':
print >>fout, indented_str(lvl, lvl_state, '\\end{cases}')
elif lvl['title'] == 'onecol':
print >>fout, indented_str(lvl, lvl_state, '\\end{strip}')
elif lvl['title'] == 'eqns*':
print >>fout, indented_str(lvl, lvl_state, '\\end{align*}')
elif lvl['title'] == 'eqns':
print >>fout, indented_str(lvl, lvl_state, '\\end{align}')
elif lvl['title'] == 'eqn':
print >>fout, indented_str(lvl, lvl_state, '\\end{equation}')
elif lvl['title'] == 'mm':
print >>fout, indented_str(lvl, lvl_state, '$$')
elif lvl['title'] == 'm':
print >>fout, indented_str(lvl, lvl_state, '$')
elif lvl['title'] in ['table', 'mtable']:
print >>fout, indented_str(lvl, lvl_state, '\\end{tabular}')
if lvl_state.get('has_group', False):
print >>fout, indented_str(lvl, lvl_state, '\\endgroup')
elif lvl['title'] == 'tex':
print >>fout, indented_str(lvl, lvl_state, '\\end{identity}')
elif lvl['title'] in ['footnote', 'foot']:
print >>fout, indented_str(lvl, lvl_state, '}')
elif lvl['title'] in ['say']:
print >>fout, indented_str(lvl, lvl_state, '}')
elif lvl['title'] in ['underline']:
print >>fout, indented_str(lvl, lvl_state, '}')
elif lvl['title'] in ['quote']:
print >>fout, indented_str(lvl, lvl_state, '\\end{quote}')
elif lvl['title'] == 'footurl':
print >>fout, indented_str(lvl, lvl_state, '}}')
elif lvl['title'] == 'url':
print >>fout, indented_str(lvl, lvl_state, '}')
elif lvl['title'] == 'color':
print >>fout, indented_str(lvl, lvl_state, '} \\endgroup')
elif lvl['title'] == 'keep':
1
elif lvl['title'] == '' and len(lvl['children']) > 0:
if lvl['rec_parent'] is not None:
if lvl['rec_parent']['title'] == 'llist':
print >>fout, indented_str(lvl['rec_parent'], lvl_state, '\\end{enumerate}')
elif lvl['rec_parent']['title'] == 'bbullets':
print >>fout, indented_str(lvl['rec_parent'], lvl_state, '\\end{itemize}')
def process_nodes_recurse(node, title_node, glob_state):
lvl_state = node['lvl_state']
begin_lvl(node, lvl_state, title_node, glob_state)
for cn in node['children']:
begin_line(node, cn, lvl_state)
if cn['title'] == '':
do_line(node, cn, lvl_state, glob_state)
process_nodes_recurse(cn, title_node if cn['title'] == '' else cn, glob_state)
end_line(node, cn, lvl_state)
end_lvl(node, lvl_state, glob_state)
glob_state = { 'settings':{} }
process_nodes_recurse(root_node, None, glob_state)
largv = sys.argv
main()
| unlicense |
bdfoster/blumate | blumate/components/sensor/forecast.py | 1 | 9411 | """
Support for Forecast.io weather service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.forecast/
"""
import logging
from datetime import timedelta
from blumate.const import CONF_API_KEY, TEMP_CELSIUS
from blumate.helpers.entity import Entity
from blumate.util import Throttle
REQUIREMENTS = ['python-forecastio==1.3.4']
_LOGGER = logging.getLogger(__name__)
# Sensor types are defined like so:
# Name, si unit, us unit, ca unit, uk unit, uk2 unit
SENSOR_TYPES = {
'summary': ['Summary', None, None, None, None, None],
'minutely_summary': ['Minutely Summary', None, None, None, None, None],
'hourly_summary': ['Hourly Summary', None, None, None, None, None],
'daily_summary': ['Daily Summary', None, None, None, None, None],
'icon': ['Icon', None, None, None, None, None],
'nearest_storm_distance': ['Nearest Storm Distance',
'km', 'm', 'km', 'km', 'm'],
'nearest_storm_bearing': ['Nearest Storm Bearing',
'°', '°', '°', '°', '°'],
'precip_type': ['Precip', None, None, None, None, None],
'precip_intensity': ['Precip Intensity', 'mm', 'in', 'mm', 'mm', 'mm'],
'precip_probability': ['Precip Probability', '%', '%', '%', '%', '%'],
'temperature': ['Temperature', '°C', '°F', '°C', '°C', '°C'],
'apparent_temperature': ['Apparent Temperature',
'°C', '°F', '°C', '°C', '°C'],
'dew_point': ['Dew point', '°C', '°F', '°C', '°C', '°C'],
'wind_speed': ['Wind Speed', 'm/s', 'mph', 'km/h', 'mph', 'mph'],
'wind_bearing': ['Wind Bearing', '°', '°', '°', '°', '°'],
'cloud_cover': ['Cloud Coverage', '%', '%', '%', '%', '%'],
'humidity': ['Humidity', '%', '%', '%', '%', '%'],
'pressure': ['Pressure', 'mbar', 'mbar', 'mbar', 'mbar', 'mbar'],
'visibility': ['Visibility', 'km', 'm', 'km', 'km', 'm'],
'ozone': ['Ozone', 'DU', 'DU', 'DU', 'DU', 'DU'],
}
# Return cached results if last scan was less then this time ago.
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=120)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Forecast.io sensor."""
import forecastio
if None in (hass.config.latitude, hass.config.longitude):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return False
try:
forecast = forecastio.load_forecast(config.get(CONF_API_KEY, None),
hass.config.latitude,
hass.config.longitude)
forecast.currently()
except ValueError:
_LOGGER.error(
"Connection error "
"Please check your settings for Forecast.io.")
return False
if 'units' in config:
units = config['units']
elif hass.config.temperature_unit == TEMP_CELSIUS:
units = 'si'
else:
units = 'us'
data = ForeCastData(config.get(CONF_API_KEY, None),
hass.config.latitude,
hass.config.longitude,
units)
dev = []
for variable in config['monitored_conditions']:
if variable not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', variable)
else:
dev.append(ForeCastSensor(data, variable))
add_devices(dev)
# pylint: disable=too-few-public-methods
class ForeCastSensor(Entity):
"""Implementation of a Forecast.io sensor."""
def __init__(self, weather_data, sensor_type):
"""Initialize the sensor."""
self.client_name = 'Weather'
self._name = SENSOR_TYPES[sensor_type][0]
self.forecast_client = weather_data
self.type = sensor_type
self._state = None
self._unit_system = self.forecast_client.unit_system
if self._unit_system == 'si':
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
elif self._unit_system == 'us':
self._unit_of_measurement = SENSOR_TYPES[self.type][2]
elif self._unit_system == 'ca':
self._unit_of_measurement = SENSOR_TYPES[self.type][3]
elif self._unit_system == 'uk':
self._unit_of_measurement = SENSOR_TYPES[self.type][4]
elif self._unit_system == 'uk2':
self._unit_of_measurement = SENSOR_TYPES[self.type][5]
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def unit_system(self):
"""Return the unit system of this entity."""
return self._unit_system
# pylint: disable=too-many-branches,too-many-statements
def update(self):
"""Get the latest data from Forecast.io and updates the states."""
import forecastio
self.forecast_client.update()
try:
if self.type == 'minutely_summary':
self.forecast_client.update_minutely()
self._state = self.forecast_client.data_minutely.summary
return
elif self.type == 'hourly_summary':
self.forecast_client.update_hourly()
self._state = self.forecast_client.data_hourly.summary
return
elif self.type == 'daily_summary':
self.forecast_client.update_daily()
self._state = self.forecast_client.data_daily.summary
return
except forecastio.utils.PropertyUnavailable:
return
self.forecast_client.update_currently()
data = self.forecast_client.data_currently
try:
if self.type == 'summary':
self._state = data.summary
elif self.type == 'icon':
self._state = data.icon
elif self.type == 'nearest_storm_distance':
self._state = data.nearestStormDistance
elif self.type == 'nearest_storm_bearing':
self._state = data.nearestStormBearing
elif self.type == 'precip_intensity':
self._state = data.precipIntensity
elif self.type == 'precip_type':
self._state = data.precipType
elif self.type == 'precip_probability':
self._state = round(data.precipProbability * 100, 1)
elif self.type == 'dew_point':
self._state = round(data.dewPoint, 1)
elif self.type == 'temperature':
self._state = round(data.temperature, 1)
elif self.type == 'apparent_temperature':
self._state = round(data.apparentTemperature, 1)
elif self.type == 'wind_speed':
self._state = data.windSpeed
elif self.type == 'wind_bearing':
self._state = data.windBearing
elif self.type == 'cloud_cover':
self._state = round(data.cloudCover * 100, 1)
elif self.type == 'humidity':
self._state = round(data.humidity * 100, 1)
elif self.type == 'pressure':
self._state = round(data.pressure, 1)
elif self.type == 'visibility':
self._state = data.visibility
elif self.type == 'ozone':
self._state = round(data.ozone, 1)
except forecastio.utils.PropertyUnavailable:
pass
class ForeCastData(object):
"""Gets the latest data from Forecast.io."""
# pylint: disable=too-many-instance-attributes
def __init__(self, api_key, latitude, longitude, units):
"""Initialize the data object."""
self._api_key = api_key
self.latitude = latitude
self.longitude = longitude
self.units = units
self.data = None
self.unit_system = None
self.data_currently = None
self.data_minutely = None
self.data_hourly = None
self.data_daily = None
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Forecast.io."""
import forecastio
self.data = forecastio.load_forecast(self._api_key,
self.latitude,
self.longitude,
units=self.units)
self.unit_system = self.data.json['flags']['units']
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update_currently(self):
"""Update currently data."""
self.data_currently = self.data.currently()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update_minutely(self):
"""Update minutely data."""
self.data_minutely = self.data.minutely()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update_hourly(self):
"""Update hourly data."""
self.data_hourly = self.data.hourly()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update_daily(self):
"""Update daily data."""
self.data_daily = self.data.daily()
| mit |
toshywoshy/ansible | test/units/modules/network/f5/test_bigip_trunk.py | 22 | 4247 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_trunk import ApiParameters
from library.modules.bigip_trunk import ModuleParameters
from library.modules.bigip_trunk import ModuleManager
from library.modules.bigip_trunk import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_trunk import ApiParameters
from ansible.modules.network.f5.bigip_trunk import ModuleParameters
from ansible.modules.network.f5.bigip_trunk import ModuleManager
from ansible.modules.network.f5.bigip_trunk import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
interfaces=[
'1.3', '1.1'
],
link_selection_policy='auto',
frame_distribution_hash='destination-mac',
lacp_enabled=True,
lacp_mode='active',
lacp_timeout='long'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.interfaces == ['1.1', '1.3']
assert p.link_selection_policy == 'auto'
assert p.frame_distribution_hash == 'dst-mac'
assert p.lacp_enabled is True
assert p.lacp_mode == 'active'
assert p.lacp_timeout == 'long'
def test_api_parameters(self):
args = load_fixture('load_tm_net_trunk_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.frame_distribution_hash == 'dst-mac'
assert p.lacp_enabled is False
assert p.lacp_mode == 'active'
assert p.lacp_timeout == 'long'
assert p.interfaces == ['1.3']
assert p.link_selection_policy == 'maximum-bandwidth'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
interfaces=[
'1.3', '1.1'
],
link_selection_policy='auto',
frame_distribution_hash='destination-mac',
lacp_enabled=True,
lacp_mode='active',
lacp_timeout='long',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['link_selection_policy'] == 'auto'
assert results['frame_distribution_hash'] == 'destination-mac'
assert results['lacp_enabled'] is True
assert results['lacp_mode'] == 'active'
assert results['lacp_timeout'] == 'long'
| gpl-3.0 |
kjw0106/boto | tests/unit/emr/test_connection.py | 34 | 38189 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto.utils
from datetime import datetime
from time import time
from tests.unit import AWSMockServiceTestCase
from boto.emr.connection import EmrConnection
from boto.emr.emrobject import BootstrapAction, BootstrapActionList, \
ClusterStateChangeReason, ClusterStatus, ClusterSummaryList, \
ClusterSummary, ClusterTimeline, InstanceInfo, \
InstanceList, InstanceGroupInfo, \
InstanceGroup, InstanceGroupList, JobFlow, \
JobFlowStepList, Step, StepSummaryList, \
Cluster, RunJobFlowResponse
# These tests are just checking the basic structure of
# the Elastic MapReduce code, by picking a few calls
# and verifying we get the expected results with mocked
# responses. The integration tests actually verify the
# API calls interact with the service correctly.
class TestListClusters(AWSMockServiceTestCase):
connection_class = EmrConnection
def default_body(self):
return b"""
<ListClustersResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
<ListClustersResult>
<Clusters>
<member>
<Id>j-aaaaaaaaaaaa</Id>
<Status>
<StateChangeReason>
<Message>Terminated by user request</Message>
<Code>USER_REQUEST</Code>
</StateChangeReason>
<State>TERMINATED</State>
<Timeline>
<CreationDateTime>2014-01-24T01:21:21Z</CreationDateTime>
<ReadyDateTime>2014-01-24T01:25:26Z</ReadyDateTime>
<EndDateTime>2014-01-24T02:19:46Z</EndDateTime>
</Timeline>
</Status>
<Name>analytics test</Name>
<NormalizedInstanceHours>10</NormalizedInstanceHours>
</member>
<member>
<Id>j-aaaaaaaaaaaab</Id>
<Status>
<StateChangeReason>
<Message>Terminated by user request</Message>
<Code>USER_REQUEST</Code>
</StateChangeReason>
<State>TERMINATED</State>
<Timeline>
<CreationDateTime>2014-01-21T02:53:08Z</CreationDateTime>
<ReadyDateTime>2014-01-21T02:56:40Z</ReadyDateTime>
<EndDateTime>2014-01-21T03:40:22Z</EndDateTime>
</Timeline>
</Status>
<Name>test job</Name>
<NormalizedInstanceHours>20</NormalizedInstanceHours>
</member>
</Clusters>
</ListClustersResult>
<ResponseMetadata>
<RequestId>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</RequestId>
</ResponseMetadata>
</ListClustersResponse>
"""
def test_list_clusters(self):
self.set_http_response(status_code=200)
response = self.service_connection.list_clusters()
self.assert_request_parameters({
'Action': 'ListClusters',
'Version': '2009-03-31',
})
self.assertTrue(isinstance(response, ClusterSummaryList))
self.assertEqual(len(response.clusters), 2)
self.assertTrue(isinstance(response.clusters[0], ClusterSummary))
self.assertEqual(response.clusters[0].name, 'analytics test')
self.assertEqual(response.clusters[0].normalizedinstancehours, '10')
self.assertTrue(isinstance(response.clusters[0].status, ClusterStatus))
self.assertEqual(response.clusters[0].status.state, 'TERMINATED')
self.assertTrue(isinstance(response.clusters[0].status.timeline, ClusterTimeline))
self.assertEqual(response.clusters[0].status.timeline.creationdatetime, '2014-01-24T01:21:21Z')
self.assertEqual(response.clusters[0].status.timeline.readydatetime, '2014-01-24T01:25:26Z')
self.assertEqual(response.clusters[0].status.timeline.enddatetime, '2014-01-24T02:19:46Z')
self.assertTrue(isinstance(response.clusters[0].status.statechangereason, ClusterStateChangeReason))
self.assertEqual(response.clusters[0].status.statechangereason.code, 'USER_REQUEST')
self.assertEqual(response.clusters[0].status.statechangereason.message, 'Terminated by user request')
def test_list_clusters_created_before(self):
self.set_http_response(status_code=200)
date = datetime.now()
response = self.service_connection.list_clusters(created_before=date)
self.assert_request_parameters({
'Action': 'ListClusters',
'CreatedBefore': date.strftime(boto.utils.ISO8601),
'Version': '2009-03-31'
})
def test_list_clusters_created_after(self):
self.set_http_response(status_code=200)
date = datetime.now()
response = self.service_connection.list_clusters(created_after=date)
self.assert_request_parameters({
'Action': 'ListClusters',
'CreatedAfter': date.strftime(boto.utils.ISO8601),
'Version': '2009-03-31'
})
def test_list_clusters_states(self):
self.set_http_response(status_code=200)
response = self.service_connection.list_clusters(cluster_states=[
'RUNNING',
'WAITING'
])
self.assert_request_parameters({
'Action': 'ListClusters',
'ClusterStates.member.1': 'RUNNING',
'ClusterStates.member.2': 'WAITING',
'Version': '2009-03-31'
})
class TestListInstanceGroups(AWSMockServiceTestCase):
connection_class = EmrConnection
def default_body(self):
return b"""
<ListInstanceGroupsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
<ListInstanceGroupsResult>
<InstanceGroups>
<member>
<Id>ig-aaaaaaaaaaaaa</Id>
<InstanceType>m1.large</InstanceType>
<Market>ON_DEMAND</Market>
<Status>
<StateChangeReason>
<Message>Job flow terminated</Message>
<Code>CLUSTER_TERMINATED</Code>
</StateChangeReason>
<State>TERMINATED</State>
<Timeline>
<CreationDateTime>2014-01-24T01:21:21Z</CreationDateTime>
<ReadyDateTime>2014-01-24T01:25:08Z</ReadyDateTime>
<EndDateTime>2014-01-24T02:19:46Z</EndDateTime>
</Timeline>
</Status>
<Name>Master instance group</Name>
<RequestedInstanceCount>1</RequestedInstanceCount>
<RunningInstanceCount>0</RunningInstanceCount>
<InstanceGroupType>MASTER</InstanceGroupType>
</member>
<member>
<Id>ig-aaaaaaaaaaab</Id>
<InstanceType>m1.large</InstanceType>
<Market>ON_DEMAND</Market>
<Status>
<StateChangeReason>
<Message>Job flow terminated</Message>
<Code>CLUSTER_TERMINATED</Code>
</StateChangeReason>
<State>TERMINATED</State>
<Timeline>
<CreationDateTime>2014-01-24T01:21:21Z</CreationDateTime>
<ReadyDateTime>2014-01-24T01:25:26Z</ReadyDateTime>
<EndDateTime>2014-01-24T02:19:46Z</EndDateTime>
</Timeline>
</Status>
<Name>Core instance group</Name>
<RequestedInstanceCount>2</RequestedInstanceCount>
<RunningInstanceCount>0</RunningInstanceCount>
<InstanceGroupType>CORE</InstanceGroupType>
</member>
</InstanceGroups>
</ListInstanceGroupsResult>
<ResponseMetadata>
<RequestId>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</RequestId>
</ResponseMetadata>
</ListInstanceGroupsResponse>
"""
def test_list_instance_groups(self):
self.set_http_response(200)
with self.assertRaises(TypeError):
self.service_connection.list_instance_groups()
response = self.service_connection.list_instance_groups(cluster_id='j-123')
self.assert_request_parameters({
'Action': 'ListInstanceGroups',
'ClusterId': 'j-123',
'Version': '2009-03-31'
})
self.assertTrue(isinstance(response, InstanceGroupList))
self.assertEqual(len(response.instancegroups), 2)
self.assertTrue(isinstance(response.instancegroups[0], InstanceGroupInfo))
self.assertEqual(response.instancegroups[0].id, 'ig-aaaaaaaaaaaaa')
self.assertEqual(response.instancegroups[0].instancegrouptype, "MASTER")
self.assertEqual(response.instancegroups[0].instancetype, "m1.large")
self.assertEqual(response.instancegroups[0].market, "ON_DEMAND")
self.assertEqual(response.instancegroups[0].name, "Master instance group")
self.assertEqual(response.instancegroups[0].requestedinstancecount, '1')
self.assertEqual(response.instancegroups[0].runninginstancecount, '0')
self.assertTrue(isinstance(response.instancegroups[0].status, ClusterStatus))
self.assertEqual(response.instancegroups[0].status.state, 'TERMINATED')
# status.statechangereason is not parsed into an object
#self.assertEqual(response.instancegroups[0].status.statechangereason.code, 'CLUSTER_TERMINATED')
class TestListInstances(AWSMockServiceTestCase):
connection_class = EmrConnection
def default_body(self):
return b"""
<ListInstancesResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
<ListInstancesResult>
<Instances>
<member>
<Id>ci-123456789abc</Id>
<Status>
<StateChangeReason>
<Message>Cluster was terminated.</Message>
<Code>CLUSTER_TERMINATED</Code>
</StateChangeReason>
<State>TERMINATED</State>
<Timeline>
<CreationDateTime>2014-01-24T01:21:26Z</CreationDateTime>
<ReadyDateTime>2014-01-24T01:25:25Z</ReadyDateTime>
<EndDateTime>2014-01-24T02:19:46Z</EndDateTime>
</Timeline>
</Status>
<PrivateDnsName>ip-10-0-0-60.us-west-1.compute.internal</PrivateDnsName>
<PublicIpAddress>54.0.0.1</PublicIpAddress>
<PublicDnsName>ec2-54-0-0-1.us-west-1.compute.amazonaws.com</PublicDnsName>
<Ec2InstanceId>i-aaaaaaaa</Ec2InstanceId>
<PrivateIpAddress>10.0.0.60</PrivateIpAddress>
</member>
<member>
<Id>ci-123456789abd</Id>
<Status>
<StateChangeReason>
<Message>Cluster was terminated.</Message>
<Code>CLUSTER_TERMINATED</Code>
</StateChangeReason>
<State>TERMINATED</State>
<Timeline>
<CreationDateTime>2014-01-24T01:21:26Z</CreationDateTime>
<ReadyDateTime>2014-01-24T01:25:25Z</ReadyDateTime>
<EndDateTime>2014-01-24T02:19:46Z</EndDateTime>
</Timeline>
</Status>
<PrivateDnsName>ip-10-0-0-61.us-west-1.compute.internal</PrivateDnsName>
<PublicIpAddress>54.0.0.2</PublicIpAddress>
<PublicDnsName>ec2-54-0-0-2.us-west-1.compute.amazonaws.com</PublicDnsName>
<Ec2InstanceId>i-aaaaaaab</Ec2InstanceId>
<PrivateIpAddress>10.0.0.61</PrivateIpAddress>
</member>
<member>
<Id>ci-123456789abe3</Id>
<Status>
<StateChangeReason>
<Message>Cluster was terminated.</Message>
<Code>CLUSTER_TERMINATED</Code>
</StateChangeReason>
<State>TERMINATED</State>
<Timeline>
<CreationDateTime>2014-01-24T01:21:33Z</CreationDateTime>
<ReadyDateTime>2014-01-24T01:25:08Z</ReadyDateTime>
<EndDateTime>2014-01-24T02:19:46Z</EndDateTime>
</Timeline>
</Status>
<PrivateDnsName>ip-10-0-0-62.us-west-1.compute.internal</PrivateDnsName>
<PublicIpAddress>54.0.0.3</PublicIpAddress>
<PublicDnsName>ec2-54-0-0-3.us-west-1.compute.amazonaws.com</PublicDnsName>
<Ec2InstanceId>i-aaaaaaac</Ec2InstanceId>
<PrivateIpAddress>10.0.0.62</PrivateIpAddress>
</member>
</Instances>
</ListInstancesResult>
<ResponseMetadata>
<RequestId>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</RequestId>
</ResponseMetadata>
</ListInstancesResponse>
"""
def test_list_instances(self):
self.set_http_response(200)
with self.assertRaises(TypeError):
self.service_connection.list_instances()
response = self.service_connection.list_instances(cluster_id='j-123')
self.assertTrue(isinstance(response, InstanceList))
self.assertEqual(len(response.instances), 3)
self.assertTrue(isinstance(response.instances[0], InstanceInfo))
self.assertEqual(response.instances[0].ec2instanceid, 'i-aaaaaaaa')
self.assertEqual(response.instances[0].id, 'ci-123456789abc')
self.assertEqual(response.instances[0].privatednsname , 'ip-10-0-0-60.us-west-1.compute.internal')
self.assertEqual(response.instances[0].privateipaddress , '10.0.0.60')
self.assertEqual(response.instances[0].publicdnsname , 'ec2-54-0-0-1.us-west-1.compute.amazonaws.com')
self.assertEqual(response.instances[0].publicipaddress , '54.0.0.1')
self.assert_request_parameters({
'Action': 'ListInstances',
'ClusterId': 'j-123',
'Version': '2009-03-31'
})
def test_list_instances_with_group_id(self):
self.set_http_response(200)
response = self.service_connection.list_instances(
cluster_id='j-123', instance_group_id='abc')
self.assert_request_parameters({
'Action': 'ListInstances',
'ClusterId': 'j-123',
'InstanceGroupId': 'abc',
'Version': '2009-03-31'
})
def test_list_instances_with_types(self):
self.set_http_response(200)
response = self.service_connection.list_instances(
cluster_id='j-123', instance_group_types=[
'MASTER',
'TASK'
])
self.assert_request_parameters({
'Action': 'ListInstances',
'ClusterId': 'j-123',
'InstanceGroupTypes.member.1': 'MASTER',
'InstanceGroupTypes.member.2': 'TASK',
'Version': '2009-03-31'
})
class TestListSteps(AWSMockServiceTestCase):
connection_class = EmrConnection
def default_body(self):
return b"""<ListStepsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
<ListStepsResult>
<Steps>
<member>
<Id>abc123</Id>
<Status>
<StateChangeReason/>
<Timeline>
<CreationDateTime>2014-07-01T00:00:00.000Z</CreationDateTime>
</Timeline>
<State>PENDING</State>
</Status>
<Name>Step 1</Name>
<Config>
<Jar>/home/hadoop/lib/emr-s3distcp-1.0.jar</Jar>
<Args>
<member>--src</member>
<member>hdfs:///data/test/</member>
<member>--dest</member>
<member>s3n://test/data</member>
</Args>
<Properties/>
</Config>
<ActionOnFailure>CONTINUE</ActionOnFailure>
</member>
<member>
<Id>def456</Id>
<Status>
<StateChangeReason/>
<Timeline>
<CreationDateTime>2014-07-01T00:00:00.000Z</CreationDateTime>
</Timeline>
<State>COMPLETED</State>
</Status>
<Name>Step 2</Name>
<Config>
<MainClass>my.main.SomeClass</MainClass>
<Jar>s3n://test/jars/foo.jar</Jar>
</Config>
<ActionOnFailure>CONTINUE</ActionOnFailure>
</member>
<member>
<Id>ghi789</Id>
<Status>
<StateChangeReason/>
<Timeline>
<CreationDateTime>2014-07-01T00:00:00.000Z</CreationDateTime>
</Timeline>
<State>FAILED</State>
</Status>
<Name>Step 3</Name>
<Config>
<Jar>s3n://test/jars/bar.jar</Jar>
<Args>
<member>-arg</member>
<member>value</member>
</Args>
<Properties/>
</Config>
<ActionOnFailure>TERMINATE_CLUSTER</ActionOnFailure>
</member>
</Steps>
</ListStepsResult>
<ResponseMetadata>
<RequestId>eff31ee5-0342-11e4-b3c7-9de5a93f6fcb</RequestId>
</ResponseMetadata>
</ListStepsResponse>
"""
def test_list_steps(self):
self.set_http_response(200)
with self.assertRaises(TypeError):
self.service_connection.list_steps()
response = self.service_connection.list_steps(cluster_id='j-123')
self.assert_request_parameters({
'Action': 'ListSteps',
'ClusterId': 'j-123',
'Version': '2009-03-31'
})
self.assertTrue(isinstance(response, StepSummaryList))
self.assertEqual(response.steps[0].name, 'Step 1')
valid_states = [
'PENDING',
'RUNNING',
'COMPLETED',
'CANCELLED',
'FAILED',
'INTERRUPTED'
]
# Check for step states
for step in response.steps:
self.assertIn(step.status.state, valid_states)
# Check for step config
step = response.steps[0]
self.assertEqual(step.config.jar,
'/home/hadoop/lib/emr-s3distcp-1.0.jar')
self.assertEqual(len(step.config.args), 4)
self.assertEqual(step.config.args[0].value, '--src')
self.assertEqual(step.config.args[1].value, 'hdfs:///data/test/')
step = response.steps[1]
self.assertEqual(step.config.mainclass, 'my.main.SomeClass')
def test_list_steps_with_states(self):
self.set_http_response(200)
response = self.service_connection.list_steps(
cluster_id='j-123', step_states=[
'COMPLETED',
'FAILED'
])
self.assert_request_parameters({
'Action': 'ListSteps',
'ClusterId': 'j-123',
'StepStateList.member.1': 'COMPLETED',
'StepStateList.member.2': 'FAILED',
'Version': '2009-03-31'
})
self.assertTrue(isinstance(response, StepSummaryList))
self.assertEqual(response.steps[0].name, 'Step 1')
class TestListBootstrapActions(AWSMockServiceTestCase):
connection_class = EmrConnection
def default_body(self):
return b"""<ListBootstrapActionsOutput></ListBootstrapActionsOutput>"""
def test_list_bootstrap_actions(self):
self.set_http_response(200)
with self.assertRaises(TypeError):
self.service_connection.list_bootstrap_actions()
response = self.service_connection.list_bootstrap_actions(cluster_id='j-123')
self.assert_request_parameters({
'Action': 'ListBootstrapActions',
'ClusterId': 'j-123',
'Version': '2009-03-31'
})
class TestDescribeCluster(AWSMockServiceTestCase):
connection_class = EmrConnection
def default_body(self):
return b"""
<DescribeClusterResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
<DescribeClusterResult>
<Cluster>
<Id>j-aaaaaaaaa</Id>
<Tags/>
<Ec2InstanceAttributes>
<Ec2AvailabilityZone>us-west-1c</Ec2AvailabilityZone>
<Ec2KeyName>my_secret_key</Ec2KeyName>
</Ec2InstanceAttributes>
<RunningAmiVersion>2.4.2</RunningAmiVersion>
<VisibleToAllUsers>true</VisibleToAllUsers>
<Status>
<StateChangeReason>
<Message>Terminated by user request</Message>
<Code>USER_REQUEST</Code>
</StateChangeReason>
<State>TERMINATED</State>
<Timeline>
<CreationDateTime>2014-01-24T01:21:21Z</CreationDateTime>
<ReadyDateTime>2014-01-24T01:25:26Z</ReadyDateTime>
<EndDateTime>2014-01-24T02:19:46Z</EndDateTime>
</Timeline>
</Status>
<AutoTerminate>false</AutoTerminate>
<Name>test analytics</Name>
<RequestedAmiVersion>2.4.2</RequestedAmiVersion>
<Applications>
<member>
<Name>hadoop</Name>
<Version>1.0.3</Version>
</member>
</Applications>
<TerminationProtected>false</TerminationProtected>
<MasterPublicDnsName>ec2-184-0-0-1.us-west-1.compute.amazonaws.com</MasterPublicDnsName>
<NormalizedInstanceHours>10</NormalizedInstanceHours>
<ServiceRole>my-service-role</ServiceRole>
</Cluster>
</DescribeClusterResult>
<ResponseMetadata>
<RequestId>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</RequestId>
</ResponseMetadata>
</DescribeClusterResponse>
"""
def test_describe_cluster(self):
self.set_http_response(200)
with self.assertRaises(TypeError):
self.service_connection.describe_cluster()
response = self.service_connection.describe_cluster(cluster_id='j-123')
self.assertTrue(isinstance(response, Cluster))
self.assertEqual(response.id, 'j-aaaaaaaaa')
self.assertEqual(response.runningamiversion, '2.4.2')
self.assertEqual(response.visibletoallusers, 'true')
self.assertEqual(response.autoterminate, 'false')
self.assertEqual(response.name, 'test analytics')
self.assertEqual(response.requestedamiversion, '2.4.2')
self.assertEqual(response.terminationprotected, 'false')
self.assertEqual(response.ec2instanceattributes.ec2availabilityzone, "us-west-1c")
self.assertEqual(response.ec2instanceattributes.ec2keyname, 'my_secret_key')
self.assertEqual(response.status.state, 'TERMINATED')
self.assertEqual(response.applications[0].name, 'hadoop')
self.assertEqual(response.applications[0].version, '1.0.3')
self.assertEqual(response.masterpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com')
self.assertEqual(response.normalizedinstancehours, '10')
self.assertEqual(response.servicerole, 'my-service-role')
self.assert_request_parameters({
'Action': 'DescribeCluster',
'ClusterId': 'j-123',
'Version': '2009-03-31'
})
class TestDescribeStep(AWSMockServiceTestCase):
connection_class = EmrConnection
def default_body(self):
return b"""<DescribeStepOutput></DescribeStepOutput>"""
def test_describe_step(self):
self.set_http_response(200)
with self.assertRaises(TypeError):
self.service_connection.describe_step()
with self.assertRaises(TypeError):
self.service_connection.describe_step(cluster_id='j-123')
with self.assertRaises(TypeError):
self.service_connection.describe_step(step_id='abc')
response = self.service_connection.describe_step(
cluster_id='j-123', step_id='abc')
self.assert_request_parameters({
'Action': 'DescribeStep',
'ClusterId': 'j-123',
'StepId': 'abc',
'Version': '2009-03-31'
})
class TestAddJobFlowSteps(AWSMockServiceTestCase):
connection_class = EmrConnection
def default_body(self):
return b"""
<AddJobFlowStepsOutput>
<StepIds>
<member>Foo</member>
<member>Bar</member>
</StepIds>
</AddJobFlowStepsOutput>
"""
def test_add_jobflow_steps(self):
self.set_http_response(200)
response = self.service_connection.add_jobflow_steps(
jobflow_id='j-123', steps=[])
# Make sure the correct object is returned, as this was
# previously set to incorrectly return an empty instance
# of RunJobFlowResponse.
self.assertTrue(isinstance(response, JobFlowStepList))
self.assertEqual(response.stepids[0].value, 'Foo')
self.assertEqual(response.stepids[1].value, 'Bar')
class TestBuildTagList(AWSMockServiceTestCase):
connection_class = EmrConnection
def test_key_without_value_encoding(self):
input_dict = {
'KeyWithNoValue': '',
'AnotherKeyWithNoValue': None
}
res = self.service_connection._build_tag_list(input_dict)
# Keys are outputted in ascending key order.
expected = {
'Tags.member.1.Key': 'AnotherKeyWithNoValue',
'Tags.member.2.Key': 'KeyWithNoValue'
}
self.assertEqual(expected, res)
def test_key_full_key_value_encoding(self):
input_dict = {
'FirstKey': 'One',
'SecondKey': 'Two'
}
res = self.service_connection._build_tag_list(input_dict)
# Keys are outputted in ascending key order.
expected = {
'Tags.member.1.Key': 'FirstKey',
'Tags.member.1.Value': 'One',
'Tags.member.2.Key': 'SecondKey',
'Tags.member.2.Value': 'Two'
}
self.assertEqual(expected, res)
class TestAddTag(AWSMockServiceTestCase):
connection_class = EmrConnection
def default_body(self):
return b"""<AddTagsResponse
xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
<AddTagsResult/>
<ResponseMetadata>
<RequestId>88888888-8888-8888-8888-888888888888</RequestId>
</ResponseMetadata>
</AddTagsResponse>
"""
def test_add_mix_of_tags_with_without_values(self):
input_tags = {
'FirstKey': 'One',
'SecondKey': 'Two',
'ZzzNoValue': ''
}
self.set_http_response(200)
with self.assertRaises(TypeError):
self.service_connection.add_tags()
with self.assertRaises(TypeError):
self.service_connection.add_tags('j-123')
with self.assertRaises(AssertionError):
self.service_connection.add_tags('j-123', [])
response = self.service_connection.add_tags('j-123', input_tags)
self.assertTrue(response)
self.assert_request_parameters({
'Action': 'AddTags',
'ResourceId': 'j-123',
'Tags.member.1.Key': 'FirstKey',
'Tags.member.1.Value': 'One',
'Tags.member.2.Key': 'SecondKey',
'Tags.member.2.Value': 'Two',
'Tags.member.3.Key': 'ZzzNoValue',
'Version': '2009-03-31'
})
class TestRemoveTag(AWSMockServiceTestCase):
connection_class = EmrConnection
def default_body(self):
return b"""<RemoveTagsResponse
xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
<RemoveTagsResult/>
<ResponseMetadata>
<RequestId>88888888-8888-8888-8888-888888888888</RequestId>
</ResponseMetadata>
</RemoveTagsResponse>
"""
def test_remove_tags(self):
input_tags = {
'FirstKey': 'One',
'SecondKey': 'Two',
'ZzzNoValue': ''
}
self.set_http_response(200)
with self.assertRaises(TypeError):
self.service_connection.add_tags()
with self.assertRaises(TypeError):
self.service_connection.add_tags('j-123')
with self.assertRaises(AssertionError):
self.service_connection.add_tags('j-123', [])
response = self.service_connection.remove_tags('j-123', ['FirstKey', 'SecondKey'])
self.assertTrue(response)
self.assert_request_parameters({
'Action': 'RemoveTags',
'ResourceId': 'j-123',
'TagKeys.member.1': 'FirstKey',
'TagKeys.member.2': 'SecondKey',
'Version': '2009-03-31'
})
class DescribeJobFlowsTestBase(AWSMockServiceTestCase):
connection_class = EmrConnection
def default_body(self):
return b"""
<DescribeJobFlowsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
<DescribeJobFlowsResult>
<JobFlows>
<member>
<AmiVersion>2.4.2</AmiVersion>
<ExecutionStatusDetail>
<CreationDateTime>2014-01-24T01:21:21Z</CreationDateTime>
<LastStateChangeReason>Terminated by user request</LastStateChangeReason>
<StartDateTime>2014-01-24T01:25:26Z</StartDateTime>
<ReadyDateTime>2014-01-24T01:25:26Z</ReadyDateTime>
<State>TERMINATED</State>
<EndDateTime>2014-01-24T02:19:46Z</EndDateTime>
</ExecutionStatusDetail>
<BootstrapActions/>
<VisibleToAllUsers>true</VisibleToAllUsers>
<SupportedProducts/>
<Name>test analytics</Name>
<JobFlowId>j-aaaaaa</JobFlowId>
<Steps>
<member>
<ExecutionStatusDetail>
<CreationDateTime>2014-01-24T01:21:21Z</CreationDateTime>
<StartDateTime>2014-01-24T01:25:26Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2014-01-24T01:26:08Z</EndDateTime>
</ExecutionStatusDetail>
<StepConfig>
<HadoopJarStep>
<Args>
<member>s3://us-west-1.elasticmapreduce/libs/hive/hive-script</member>
<member>--base-path</member>
<member>s3://us-west-1.elasticmapreduce/libs/hive/</member>
<member>--install-hive</member>
<member>--hive-versions</member>
<member>0.11.0.1</member>
</Args>
<Jar>s3://us-west-1.elasticmapreduce/libs/script-runner/script-runner.jar</Jar>
<Properties/>
</HadoopJarStep>
<Name>Setup hive</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
</member>
</Steps>
<Instances>
<Placement>
<AvailabilityZone>us-west-1c</AvailabilityZone>
</Placement>
<MasterInstanceType>m1.large</MasterInstanceType>
<Ec2KeyName>my_key</Ec2KeyName>
<KeepJobFlowAliveWhenNoSteps>true</KeepJobFlowAliveWhenNoSteps>
<InstanceGroups>
<member>
<CreationDateTime>2014-01-24T01:21:21Z</CreationDateTime>
<InstanceRunningCount>0</InstanceRunningCount>
<StartDateTime>2014-01-24T01:23:56Z</StartDateTime>
<ReadyDateTime>2014-01-24T01:25:08Z</ReadyDateTime>
<State>ENDED</State>
<EndDateTime>2014-01-24T02:19:46Z</EndDateTime>
<InstanceRequestCount>1</InstanceRequestCount>
<InstanceType>m1.large</InstanceType>
<LastStateChangeReason>Job flow terminated</LastStateChangeReason>
<Market>ON_DEMAND</Market>
<InstanceGroupId>ig-aaaaaa</InstanceGroupId>
<InstanceRole>MASTER</InstanceRole>
<Name>Master instance group</Name>
</member>
<member>
<CreationDateTime>2014-01-24T01:21:21Z</CreationDateTime>
<InstanceRunningCount>0</InstanceRunningCount>
<StartDateTime>2014-01-24T01:25:26Z</StartDateTime>
<ReadyDateTime>2014-01-24T01:25:26Z</ReadyDateTime>
<State>ENDED</State>
<EndDateTime>2014-01-24T02:19:46Z</EndDateTime>
<InstanceRequestCount>2</InstanceRequestCount>
<InstanceType>m1.large</InstanceType>
<LastStateChangeReason>Job flow terminated</LastStateChangeReason>
<Market>ON_DEMAND</Market>
<InstanceGroupId>ig-aaaaab</InstanceGroupId>
<InstanceRole>CORE</InstanceRole>
<Name>Core instance group</Name>
</member>
</InstanceGroups>
<SlaveInstanceType>m1.large</SlaveInstanceType>
<MasterInstanceId>i-aaaaaa</MasterInstanceId>
<HadoopVersion>1.0.3</HadoopVersion>
<NormalizedInstanceHours>12</NormalizedInstanceHours>
<MasterPublicDnsName>ec2-184-0-0-1.us-west-1.compute.amazonaws.com</MasterPublicDnsName>
<InstanceCount>3</InstanceCount>
<TerminationProtected>false</TerminationProtected>
</Instances>
</member>
</JobFlows>
</DescribeJobFlowsResult>
<ResponseMetadata>
<RequestId>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</RequestId>
</ResponseMetadata>
</DescribeJobFlowsResponse>
"""
class TestDescribeJobFlows(DescribeJobFlowsTestBase):
def test_describe_jobflows_response(self):
self.set_http_response(200)
response = self.service_connection.describe_jobflows()
self.assertTrue(isinstance(response, list))
jf = response[0]
self.assertTrue(isinstance(jf, JobFlow))
self.assertEqual(jf.amiversion, '2.4.2')
self.assertEqual(jf.visibletoallusers, 'true')
self.assertEqual(jf.name, 'test analytics')
self.assertEqual(jf.jobflowid, 'j-aaaaaa')
self.assertEqual(jf.ec2keyname, 'my_key')
self.assertEqual(jf.masterinstancetype, 'm1.large')
self.assertEqual(jf.availabilityzone, 'us-west-1c')
self.assertEqual(jf.keepjobflowalivewhennosteps, 'true')
self.assertEqual(jf.slaveinstancetype, 'm1.large')
self.assertEqual(jf.masterinstanceid, 'i-aaaaaa')
self.assertEqual(jf.hadoopversion, '1.0.3')
self.assertEqual(jf.normalizedinstancehours, '12')
self.assertEqual(jf.masterpublicdnsname, 'ec2-184-0-0-1.us-west-1.compute.amazonaws.com')
self.assertEqual(jf.instancecount, '3')
self.assertEqual(jf.terminationprotected, 'false')
self.assertTrue(isinstance(jf.steps, list))
step = jf.steps[0]
self.assertTrue(isinstance(step, Step))
self.assertEqual(step.jar, 's3://us-west-1.elasticmapreduce/libs/script-runner/script-runner.jar')
self.assertEqual(step.name, 'Setup hive')
self.assertEqual(step.actiononfailure, 'TERMINATE_JOB_FLOW')
self.assertTrue(isinstance(jf.instancegroups, list))
ig = jf.instancegroups[0]
self.assertTrue(isinstance(ig, InstanceGroup))
self.assertEqual(ig.creationdatetime, '2014-01-24T01:21:21Z')
self.assertEqual(ig.state, 'ENDED')
self.assertEqual(ig.instancerequestcount, '1')
self.assertEqual(ig.instancetype, 'm1.large')
self.assertEqual(ig.laststatechangereason, 'Job flow terminated')
self.assertEqual(ig.market, 'ON_DEMAND')
self.assertEqual(ig.instancegroupid, 'ig-aaaaaa')
self.assertEqual(ig.instancerole, 'MASTER')
self.assertEqual(ig.name, 'Master instance group')
def test_describe_jobflows_no_args(self):
self.set_http_response(200)
self.service_connection.describe_jobflows()
self.assert_request_parameters({
'Action': 'DescribeJobFlows',
}, ignore_params_values=['Version'])
def test_describe_jobflows_filtered(self):
self.set_http_response(200)
now = datetime.now()
a_bit_before = datetime.fromtimestamp(time() - 1000)
self.service_connection.describe_jobflows(states=['WAITING', 'RUNNING'], jobflow_ids=['j-aaaaaa', 'j-aaaaab'], created_after=a_bit_before, created_before=now)
self.assert_request_parameters({
'Action': 'DescribeJobFlows',
'JobFlowIds.member.1': 'j-aaaaaa',
'JobFlowIds.member.2': 'j-aaaaab',
'JobFlowStates.member.1': 'WAITING',
'JobFlowStates.member.2': 'RUNNING',
'CreatedAfter': a_bit_before.strftime(boto.utils.ISO8601),
'CreatedBefore': now.strftime(boto.utils.ISO8601),
}, ignore_params_values=['Version'])
class TestDescribeJobFlow(DescribeJobFlowsTestBase):
def test_describe_jobflow(self):
self.set_http_response(200)
response = self.service_connection.describe_jobflow('j-aaaaaa')
self.assertTrue(isinstance(response, JobFlow))
self.assert_request_parameters({
'Action': 'DescribeJobFlows',
'JobFlowIds.member.1': 'j-aaaaaa',
}, ignore_params_values=['Version'])
class TestRunJobFlow(AWSMockServiceTestCase):
connection_class = EmrConnection
def default_body(self):
return b"""
<RunJobFlowResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
<RunJobFlowResult>
<JobFlowId>j-ZKIY4CKQRX72</JobFlowId>
</RunJobFlowResult>
<ResponseMetadata>
<RequestId>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</RequestId>
</ResponseMetadata>
</RunJobFlowResponse>
"""
def test_run_jobflow_service_role(self):
self.set_http_response(200)
response = self.service_connection.run_jobflow(
'EmrCluster', service_role='EMR_DefaultRole')
self.assertTrue(response)
self.assert_request_parameters({
'Action': 'RunJobFlow',
'Version': '2009-03-31',
'ServiceRole': 'EMR_DefaultRole',
'Name': 'EmrCluster' },
ignore_params_values=['ActionOnFailure', 'Instances.InstanceCount',
'Instances.KeepJobFlowAliveWhenNoSteps',
'Instances.MasterInstanceType',
'Instances.SlaveInstanceType'])
| mit |
rvalyi/geraldo | site/newsite/site-geraldo/django/template/defaultfilters.py | 12 | 26148 | """Default variable filters."""
import re
import random as random_module
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
from django.template import Variable, Library
from django.conf import settings
from django.utils.translation import ugettext, ungettext
from django.utils.encoding import force_unicode, iri_to_uri
from django.utils.safestring import mark_safe, SafeData
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_unicode(args[0])
if isinstance(args[0], SafeData) and getattr(func, 'is_safe', False):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser).
_dec._decorated_function = getattr(func, '_decorated_function', func)
for attr in ('is_safe', 'needs_autoescape'):
if hasattr(func, attr):
setattr(_dec, attr, getattr(func, attr))
return wraps(func)(_dec)
###################
# STRINGS #
###################
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
addslashes.is_safe = True
addslashes = stringfilter(addslashes)
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
capfirst.is_safe=True
capfirst = stringfilter(capfirst)
_base_js_escapes = (
('\\', r'\x5C'),
('\'', r'\x27'),
('"', r'\x22'),
('>', r'\x3E'),
('<', r'\x3C'),
('&', r'\x26'),
('=', r'\x3D'),
('-', r'\x2D'),
(';', r'\x3B')
)
# Escape every ASCII character with a value less than 32.
_js_escapes = (_base_js_escapes +
tuple([('%c' % z, '\\x%02X' % z) for z in range(32)]))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
for bad, good in _js_escapes:
value = value.replace(bad, good)
return value
escapejs = stringfilter(escapejs)
def fix_ampersands(value):
"""Replaces ampersands with ``&`` entities."""
from django.utils.html import fix_ampersands
return fix_ampersands(value)
fix_ampersands.is_safe=True
fix_ampersands = stringfilter(fix_ampersands)
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
"""
try:
f = float(text)
except (ValueError, TypeError):
return u''
try:
d = int(arg)
except ValueError:
return force_unicode(f)
try:
m = f - int(f)
except OverflowError:
return force_unicode(f)
if not m and d < 0:
return mark_safe(u'%d' % int(f))
else:
formatstr = u'%%.%df' % abs(d)
return mark_safe(formatstr % f)
floatformat.is_safe = True
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_unicode(iri_to_uri(value))
iriencode.is_safe = True
iriencode = stringfilter(iriencode)
def linenumbers(value, autoescape=None):
"""Displays text with line numbers."""
from django.utils.html import escape
lines = value.split(u'\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = unicode(len(unicode(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, escape(line))
return mark_safe(u'\n'.join(lines))
linenumbers.is_safe = True
linenumbers.needs_autoescape = True
linenumbers = stringfilter(linenumbers)
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
lower.is_safe = True
lower = stringfilter(lower)
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
make_list.is_safe = False
make_list = stringfilter(make_list)
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return mark_safe(re.sub('[-\s]+', '-', value))
slugify.is_safe = True
slugify = stringfilter(slugify)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return (u"%" + unicode(arg)) % value
except (ValueError, TypeError):
return u""
stringformat.is_safe = True
def title(value):
"""Converts a string into titlecase."""
return re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
title.is_safe = True
title = stringfilter(title)
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
"""
from django.utils.text import truncate_words
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return truncate_words(value, length)
truncatewords.is_safe = True
truncatewords = stringfilter(truncatewords)
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
"""
from django.utils.text import truncate_html_words
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return truncate_html_words(value, length)
truncatewords_html.is_safe = True
truncatewords_html = stringfilter(truncatewords_html)
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
upper.is_safe = False
upper = stringfilter(upper)
def urlencode(value):
"""Escapes a value for use in a URL."""
from django.utils.http import urlquote
return urlquote(value)
urlencode.is_safe = False
urlencode = stringfilter(urlencode)
def urlize(value, autoescape=None):
"""Converts URLs in plain text into clickable links."""
from django.utils.html import urlize
return mark_safe(urlize(value, nofollow=True, autoescape=autoescape))
urlize.is_safe=True
urlize.needs_autoescape = True
urlize = stringfilter(urlize)
def urlizetrunc(value, limit, autoescape=None):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
from django.utils.html import urlize
return mark_safe(urlize(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
urlizetrunc.is_safe = True
urlizetrunc.needs_autoescape = True
urlizetrunc = stringfilter(urlizetrunc)
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
wordcount.is_safe = False
wordcount = stringfilter(wordcount)
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
from django.utils.text import wrap
return wrap(value, int(arg))
wordwrap.is_safe = True
wordwrap = stringfilter(wordwrap)
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
ljust.is_safe = True
ljust = stringfilter(ljust)
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
rjust.is_safe = True
rjust = stringfilter(rjust)
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
center.is_safe = True
center = stringfilter(center)
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, u'')
if safe and arg != ';':
return mark_safe(value)
return value
cut = stringfilter(cut)
###################
# HTML STRINGS #
###################
def escape(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
from django.utils.safestring import mark_for_escaping
return mark_for_escaping(value)
escape.is_safe = True
escape = stringfilter(escape)
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
from django.utils.html import escape
return mark_safe(escape(value))
force_escape = stringfilter(force_escape)
force_escape.is_safe = True
def linebreaks(value, autoescape=None):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
from django.utils.html import linebreaks
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
linebreaks.is_safe = True
linebreaks.needs_autoescape = True
linebreaks = stringfilter(linebreaks)
def linebreaksbr(value, autoescape=None):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
if autoescape and not isinstance(value, SafeData):
from django.utils.html import escape
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
linebreaksbr.is_safe = True
linebreaksbr.needs_autoescape = True
linebreaksbr = stringfilter(linebreaksbr)
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
from django.utils.safestring import mark_safe
return mark_safe(value)
safe.is_safe = True
safe = stringfilter(safe)
def removetags(value, tags):
"""Removes a space separated list of [X]HTML tags from the output."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = u'(%s)' % u'|'.join(tags)
starttag_re = re.compile(ur'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile(u'</%s>' % tags_re)
value = starttag_re.sub(u'', value)
value = endtag_re.sub(u'', value)
return value
removetags.is_safe = True
removetags = stringfilter(removetags)
def striptags(value):
"""Strips all [X]HTML tags."""
from django.utils.html import strip_tags
return strip_tags(value)
striptags.is_safe = True
striptags = stringfilter(striptags)
###################
# LISTS #
###################
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
var_resolve = Variable(arg).resolve
decorated = [(var_resolve(item), item) for item in value]
decorated.sort()
return [item[1] for item in decorated]
dictsort.is_safe = False
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
var_resolve = Variable(arg).resolve
decorated = [(var_resolve(item), item) for item in value]
decorated.sort()
decorated.reverse()
return [item[1] for item in decorated]
dictsortreversed.is_safe = False
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return u''
first.is_safe = False
def join(value, arg):
"""Joins a list with a string, like Python's ``str.join(list)``."""
try:
data = arg.join(map(force_unicode, value))
except AttributeError: # fail silently but nicely
return value
safe_args = reduce(lambda lhs, rhs: lhs and isinstance(rhs, SafeData),
value, True)
if safe_args:
return mark_safe(data)
else:
return data
join.is_safe = True
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return u''
last.is_safe = True
def length(value):
"""Returns the length of the value - useful for lists."""
return len(value)
length.is_safe = True
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
return len(value) == int(arg)
length_is.is_safe = True
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
random.is_safe = True
def slice_(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://diveintopython.org/native_data_types/lists.html#odbchelper.list.slice
for an introduction.
"""
try:
bits = []
for x in arg.split(u':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
slice_.is_safe = True
def unordered_list(value, autoescape=None):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
from django.utils.html import conditional_escape
escaper = conditional_escape
else:
escaper = lambda x: x
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def _helper(list_, tabs=1):
indent = u'\t' * tabs
output = []
list_length = len(list_)
i = 0
while i < list_length:
title = list_[i]
sublist = ''
sublist_item = None
if isinstance(title, (list, tuple)):
sublist_item = title
title = ''
elif i < list_length - 1:
next_item = list_[i+1]
if next_item and isinstance(next_item, (list, tuple)):
# The next item is a sub-list.
sublist_item = next_item
# We've processed the next item now too.
i += 1
if sublist_item:
sublist = _helper(sublist_item, tabs+1)
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (indent, sublist,
indent, indent)
output.append('%s<li>%s%s</li>' % (indent,
escaper(force_unicode(title)), sublist))
i += 1
return '\n'.join(output)
value, converted = convert_old_style_list(value)
return mark_safe(_helper(value))
unordered_list.is_safe = True
unordered_list.needs_autoescape = True
###################
# INTEGERS #
###################
def add(value, arg):
"""Adds the arg to the value."""
return int(value) + int(arg)
add.is_safe = False
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
get_digit.is_safe = False
###################
# DATES #
###################
def date(value, arg=None):
"""Formats a date according to the given format."""
from django.utils.dateformat import format
if not value:
return u''
if arg is None:
arg = settings.DATE_FORMAT
return format(value, arg)
date.is_safe = False
def time(value, arg=None):
"""Formats a time according to the given format."""
from django.utils.dateformat import time_format
if value in (None, u''):
return u''
if arg is None:
arg = settings.TIME_FORMAT
return time_format(value, arg)
time.is_safe = False
def timesince(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
from django.utils.timesince import timesince
if not value:
return u''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return u''
timesince.is_safe = False
def timeuntil(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
from django.utils.timesince import timeuntil
from datetime import datetime
if not value:
return u''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return u''
timeuntil.is_safe = False
###################
# LOGIC #
###################
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
default.is_safe = False
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
default_if_none.is_safe = False
def divisibleby(value, arg):
"""Returns True if the value is devisible by the argument."""
return int(value) % int(arg) == 0
divisibleby.is_safe = False
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings accoding to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(u',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
yesno.is_safe = False
###################
# MISC #
###################
def filesizeformat(bytes):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc).
"""
try:
bytes = float(bytes)
except TypeError:
return u"0 bytes"
if bytes < 1024:
return ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes}
if bytes < 1024 * 1024:
return ugettext("%.1f KB") % (bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return ugettext("%.1f MB") % (bytes / (1024 * 1024))
return ugettext("%.1f GB") % (bytes / (1024 * 1024 * 1024))
filesizeformat.is_safe = True
def pluralize(value, arg=u's'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if not u',' in arg:
arg = u',' + arg
bits = arg.split(u',')
if len(bits) > 2:
return u''
singular_suffix, plural_suffix = bits[:2]
try:
if int(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
pluralize.is_safe = False
def phone2numeric(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
from django.utils.text import phone2numeric
return phone2numeric(value)
phone2numeric.is_safe = True
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
from pprint import pformat
try:
return pformat(value)
except Exception, e:
return u"Error in formatting: %s" % force_unicode(e, errors="replace")
pprint.is_safe = True
# Syntax: register.filter(name of filter, callback)
register.filter(add)
register.filter(addslashes)
register.filter(capfirst)
register.filter(center)
register.filter(cut)
register.filter(date)
register.filter(default)
register.filter(default_if_none)
register.filter(dictsort)
register.filter(dictsortreversed)
register.filter(divisibleby)
register.filter(escape)
register.filter(escapejs)
register.filter(filesizeformat)
register.filter(first)
register.filter(fix_ampersands)
register.filter(floatformat)
register.filter(force_escape)
register.filter(get_digit)
register.filter(iriencode)
register.filter(join)
register.filter(last)
register.filter(length)
register.filter(length_is)
register.filter(linebreaks)
register.filter(linebreaksbr)
register.filter(linenumbers)
register.filter(ljust)
register.filter(lower)
register.filter(make_list)
register.filter(phone2numeric)
register.filter(pluralize)
register.filter(pprint)
register.filter(removetags)
register.filter(random)
register.filter(rjust)
register.filter(safe)
register.filter('slice', slice_)
register.filter(slugify)
register.filter(stringformat)
register.filter(striptags)
register.filter(time)
register.filter(timesince)
register.filter(timeuntil)
register.filter(title)
register.filter(truncatewords)
register.filter(truncatewords_html)
register.filter(unordered_list)
register.filter(upper)
register.filter(urlencode)
register.filter(urlize)
register.filter(urlizetrunc)
register.filter(wordcount)
register.filter(wordwrap)
register.filter(yesno)
| lgpl-3.0 |
dgarros/ansible | lib/ansible/modules/files/blockinfile.py | 24 | 10652 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, 2015 YAEGASHI Takeshi <yaegashi@debian.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: blockinfile
author:
- 'YAEGASHI Takeshi (@yaegashi)'
extends_documentation_fragment:
- files
- validate
short_description: Insert/update/remove a text block
surrounded by marker lines.
version_added: '2.0'
description:
- This module will insert/update/remove a block of multi-line text
surrounded by customizable marker lines.
options:
path:
aliases: [ dest, destfile, name ]
required: true
description:
- The file to modify.
- Before 2.3 this option was only usable as I(dest), I(destfile) and I(name).
state:
required: false
choices: [ present, absent ]
default: present
description:
- Whether the block should be there or not.
marker:
required: false
default: '# {mark} ANSIBLE MANAGED BLOCK'
description:
- The marker line template.
"{mark}" will be replaced with "BEGIN" or "END".
block:
aliases: [ content ]
required: false
default: ''
description:
- The text to insert inside the marker lines.
If it's missing or an empty string,
the block will be removed as if C(state) were specified to C(absent).
insertafter:
required: false
default: EOF
description:
- If specified, the block will be inserted after the last match of
specified regular expression. A special value is available; C(EOF) for
inserting the block at the end of the file. If specified regular
expression has no matches, C(EOF) will be used instead.
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
default: None
description:
- If specified, the block will be inserted before the last match of
specified regular expression. A special value is available; C(BOF) for
inserting the block at the beginning of the file. If specified regular
expression has no matches, the block will be inserted at the end of the
file.
choices: [ 'BOF', '*regex*' ]
create:
required: false
default: 'no'
choices: [ 'yes', 'no' ]
description:
- Create a new file if it doesn't exist.
backup:
required: false
default: 'no'
choices: [ 'yes', 'no' ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
follow:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- 'This flag indicates that filesystem links, if they exist, should be followed.'
version_added: "2.1"
notes:
- This module supports check mode.
- When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration.
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
"""
EXAMPLES = r"""
# Before 2.3, option 'dest' or 'name' was used instead of 'path'
- name: insert/update "Match User" configuration block in /etc/ssh/sshd_config
blockinfile:
path: /etc/ssh/sshd_config
block: |
Match User ansible-agent
PasswordAuthentication no
- name: insert/update eth0 configuration stanza in /etc/network/interfaces
(it might be better to copy files into /etc/network/interfaces.d/)
blockinfile:
path: /etc/network/interfaces
block: |
iface eth0 inet static
address 192.0.2.23
netmask 255.255.255.0
- name: insert/update configuration using a local file
blockinfile:
block: "{{ lookup('file', './local/ssh_config') }}"
dest: "/etc/ssh/ssh_config"
backup: yes
- name: insert/update HTML surrounded by custom markers after <body> line
blockinfile:
path: /var/www/html/index.html
marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
insertafter: "<body>"
content: |
<h1>Welcome to {{ ansible_hostname }}</h1>
<p>Last updated on {{ ansible_date_time.iso8601 }}</p>
- name: remove HTML as well as surrounding markers
blockinfile:
path: /var/www/html/index.html
marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
content: ""
- name: Add mappings to /etc/hosts
blockinfile:
path: /etc/hosts
block: |
{{ item.ip }} {{ item.name }}
marker: "# {mark} ANSIBLE MANAGED BLOCK {{ item.name }}"
with_items:
- { name: host1, ip: 10.10.1.10 }
- { name: host2, ip: 10.10.1.11 }
- { name: host3, ip: 10.10.1.12 }
"""
import re
import os
import tempfile
from ansible.module_utils.six import b
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes
def write_changes(module, contents, path):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd, 'wb')
f.write(contents)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, aliases=['dest', 'destfile', 'name'], type='path'),
state=dict(default='present', choices=['absent', 'present']),
marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'),
block=dict(default='', type='str', aliases=['content']),
insertafter=dict(default=None),
insertbefore=dict(default=None),
create=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
path = params['path']
if module.boolean(params.get('follow', None)):
path = os.path.realpath(path)
if os.path.isdir(path):
module.fail_json(rc=256,
msg='Path %s is a directory !' % path)
path_exists = os.path.exists(path)
if not path_exists:
if not module.boolean(params['create']):
module.fail_json(rc=257,
msg='Path %s does not exist !' % path)
original = None
lines = []
else:
f = open(path, 'rb')
original = f.read()
f.close()
lines = original.splitlines()
insertbefore = params['insertbefore']
insertafter = params['insertafter']
block = to_bytes(params['block'])
marker = to_bytes(params['marker'])
present = params['state'] == 'present'
if not present and not path_exists:
module.exit_json(changed=False, msg="File %s not present" % path)
if insertbefore is None and insertafter is None:
insertafter = 'EOF'
if insertafter not in (None, 'EOF'):
insertre = re.compile(insertafter)
elif insertbefore not in (None, 'BOF'):
insertre = re.compile(insertbefore)
else:
insertre = None
marker0 = re.sub(b(r'{mark}'), b('BEGIN'), marker)
marker1 = re.sub(b(r'{mark}'), b('END'), marker)
if present and block:
# Escape seqeuences like '\n' need to be handled in Ansible 1.x
if module.ansible_version.startswith('1.'):
block = re.sub('', block, '')
blocklines = [marker0] + block.splitlines() + [marker1]
else:
blocklines = []
n0 = n1 = None
for i, line in enumerate(lines):
if line == marker0:
n0 = i
if line == marker1:
n1 = i
if None in (n0, n1):
n0 = None
if insertre is not None:
for i, line in enumerate(lines):
if insertre.search(line):
n0 = i
if n0 is None:
n0 = len(lines)
elif insertafter is not None:
n0 += 1
elif insertbefore is not None:
n0 = 0 # insertbefore=BOF
else:
n0 = len(lines) # insertafter=EOF
elif n0 < n1:
lines[n0:n1+1] = []
else:
lines[n1:n0+1] = []
n0 = n1
lines[n0:n0] = blocklines
if lines:
result = b('\n').join(lines)
if original is None or original.endswith(b('\n')):
result += b('\n')
else:
result = ''
if original == result:
msg = ''
changed = False
elif original is None:
msg = 'File created'
changed = True
elif not blocklines:
msg = 'Block removed'
changed = True
else:
msg = 'Block inserted'
changed = True
if changed and not module.check_mode:
if module.boolean(params['backup']) and path_exists:
module.backup_local(path)
write_changes(module, result, path)
if module.check_mode and not path_exists:
module.exit_json(changed=changed, msg=msg)
msg, changed = check_file_attrs(module, changed, msg)
module.exit_json(changed=changed, msg=msg)
if __name__ == '__main__':
main()
| gpl-3.0 |
TheSimoms/Felleshoelet | spotifyconnector/venv/lib/python3.6/site-packages/pip/_vendor/urllib3/connection.py | 7 | 15170 | from __future__ import absolute_import
import datetime
import logging
import os
import socket
from socket import error as SocketError, timeout as SocketTimeout
import warnings
from .packages import six
from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
from .packages.six.moves.http_client import HTTPException # noqa: F401
try: # Compiled with SSL?
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try:
# Python 3: not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError:
# Python 2
class ConnectionError(Exception):
pass
from .exceptions import (
NewConnectionError,
ConnectTimeoutError,
SubjectAltNameWarning,
SystemTimeWarning,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
assert_fingerprint,
create_urllib3_context,
ssl_wrap_socket,
)
from .util import connection
from ._collections import HTTPHeaderDict
log = logging.getLogger(__name__)
port_by_scheme = {"http": 80, "https": 443}
# When it comes time to update this value as a part of regular maintenance
# (ie test_recent_date is failing) update it to ~6 months before the current date.
RECENT_DATE = datetime.date(2019, 1, 1)
class DummyConnection(object):
"""Used to detect a failed ConnectionCls import."""
pass
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme["http"]
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if not six.PY2:
kw.pop("strict", None)
# Pre-set source_address.
self.source_address = kw.get("source_address")
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop("socket_options", self.default_socket_options)
_HTTPConnection.__init__(self, *args, **kw)
@property
def host(self):
"""
Getter method to remove any trailing dots that indicate the hostname is an FQDN.
In general, SSL certificates don't include the trailing dot indicating a
fully-qualified domain name, and thus, they don't validate properly when
checked against a domain name that includes the dot. In addition, some
servers may not expect to receive the trailing dot when provided.
However, the hostname with trailing dot is critical to DNS resolution; doing a
lookup with the trailing dot will properly only resolve the appropriate FQDN,
whereas a lookup without a trailing dot will search the system's search domain
list. Thus, it's important to keep the original host around for use only in
those cases where it's appropriate (i.e., when doing DNS lookup to establish the
actual TCP connection across which we're going to send HTTP requests).
"""
return self._dns_host.rstrip(".")
@host.setter
def host(self, value):
"""
Setter for the `host` property.
We assume that only urllib3 uses the _dns_host attribute; httplib itself
only uses `host`, and it seems reasonable that other libraries follow suit.
"""
self._dns_host = value
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw["source_address"] = self.source_address
if self.socket_options:
extra_kw["socket_options"] = self.socket_options
try:
conn = connection.create_connection(
(self._dns_host, self.port), self.timeout, **extra_kw
)
except SocketTimeout:
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)"
% (self.host, self.timeout),
)
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e
)
return conn
def _prepare_conn(self, conn):
self.sock = conn
# Google App Engine's httplib does not define _tunnel_host
if getattr(self, "_tunnel_host", None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
body with chunked encoding and not as one block
"""
headers = HTTPHeaderDict(headers if headers is not None else {})
skip_accept_encoding = "accept-encoding" in headers
skip_host = "host" in headers
self.putrequest(
method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host
)
for header, value in headers.items():
self.putheader(header, value)
if "transfer-encoding" not in headers:
self.putheader("Transfer-Encoding", "chunked")
self.endheaders()
if body is not None:
stringish_types = six.string_types + (bytes,)
if isinstance(body, stringish_types):
body = (body,)
for chunk in body:
if not chunk:
continue
if not isinstance(chunk, bytes):
chunk = chunk.encode("utf8")
len_str = hex(len(chunk))[2:]
self.send(len_str.encode("utf-8"))
self.send(b"\r\n")
self.send(chunk)
self.send(b"\r\n")
# After the if clause, to always have a closed body
self.send(b"0\r\n\r\n")
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme["https"]
ssl_version = None
def __init__(
self,
host,
port=None,
key_file=None,
cert_file=None,
key_password=None,
strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
ssl_context=None,
server_hostname=None,
**kw
):
HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
self.key_password = key_password
self.ssl_context = ssl_context
self.server_hostname = server_hostname
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = "https"
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
# Wrap socket using verification with the root certs in
# trusted_root_certs
default_ssl_context = False
if self.ssl_context is None:
default_ssl_context = True
self.ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(self.ssl_version),
cert_reqs=resolve_cert_reqs(self.cert_reqs),
)
# Try to load OS default certs if none are given.
# Works well on Windows (requires Python3.4+)
context = self.ssl_context
if (
not self.ca_certs
and not self.ca_cert_dir
and default_ssl_context
and hasattr(context, "load_default_certs")
):
context.load_default_certs()
self.sock = ssl_wrap_socket(
sock=conn,
keyfile=self.key_file,
certfile=self.cert_file,
key_password=self.key_password,
ssl_context=self.ssl_context,
server_hostname=self.server_hostname,
)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ca_cert_dir = None
ssl_version = None
assert_fingerprint = None
def set_cert(
self,
key_file=None,
cert_file=None,
cert_reqs=None,
key_password=None,
ca_certs=None,
assert_hostname=None,
assert_fingerprint=None,
ca_cert_dir=None,
):
"""
This method should only be called once, before the connection is used.
"""
# If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also
# have an SSLContext object in which case we'll use its verify_mode.
if cert_reqs is None:
if self.ssl_context is not None:
cert_reqs = self.ssl_context.verify_mode
else:
cert_reqs = resolve_cert_reqs(None)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
def connect(self):
# Add certificate verification
conn = self._new_conn()
hostname = self.host
# Google App Engine's httplib does not define _tunnel_host
if getattr(self, "_tunnel_host", None):
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
server_hostname = hostname
if self.server_hostname is not None:
server_hostname = self.server_hostname
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn(
(
"System time is way off (before {0}). This will probably "
"lead to SSL verification errors"
).format(RECENT_DATE),
SystemTimeWarning,
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
default_ssl_context = False
if self.ssl_context is None:
default_ssl_context = True
self.ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(self.ssl_version),
cert_reqs=resolve_cert_reqs(self.cert_reqs),
)
context = self.ssl_context
context.verify_mode = resolve_cert_reqs(self.cert_reqs)
# Try to load OS default certs if none are given.
# Works well on Windows (requires Python3.4+)
if (
not self.ca_certs
and not self.ca_cert_dir
and default_ssl_context
and hasattr(context, "load_default_certs")
):
context.load_default_certs()
self.sock = ssl_wrap_socket(
sock=conn,
keyfile=self.key_file,
certfile=self.cert_file,
key_password=self.key_password,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
server_hostname=server_hostname,
ssl_context=context,
)
if self.assert_fingerprint:
assert_fingerprint(
self.sock.getpeercert(binary_form=True), self.assert_fingerprint
)
elif (
context.verify_mode != ssl.CERT_NONE
and not getattr(context, "check_hostname", False)
and self.assert_hostname is not False
):
# While urllib3 attempts to always turn off hostname matching from
# the TLS library, this cannot always be done. So we check whether
# the TLS Library still thinks it's matching hostnames.
cert = self.sock.getpeercert()
if not cert.get("subjectAltName", ()):
warnings.warn(
(
"Certificate for {0} has no `subjectAltName`, falling back to check for a "
"`commonName` for now. This feature is being removed by major browsers and "
"deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 "
"for details.)".format(hostname)
),
SubjectAltNameWarning,
)
_match_hostname(cert, self.assert_hostname or server_hostname)
self.is_verified = (
context.verify_mode == ssl.CERT_REQUIRED
or self.assert_fingerprint is not None
)
def _match_hostname(cert, asserted_hostname):
try:
match_hostname(cert, asserted_hostname)
except CertificateError as e:
log.warning(
"Certificate did not match expected hostname: %s. " "Certificate: %s",
asserted_hostname,
cert,
)
# Add cert to exception and reraise so client code can inspect
# the cert when catching the exception, if they want to
e._peer_cert = cert
raise
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
else:
HTTPSConnection = DummyConnection
| gpl-2.0 |
yrchen/CommonRepo | commonrepo/users_api/views.py | 1 | 2534 | # -*- coding: utf-8 -*-
#
# Copyright 2016 edX PDR Lab, National Central University, Taiwan.
#
# http://edxpdrlab.ncu.cc/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Created By: yrchen@ATCity.org
# Maintained By: yrchen@ATCity.org
#
"""
View configurations of user information for Users APIs in Common Repo project.
"""
from __future__ import absolute_import, unicode_literals
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework import permissions
from rest_framework import renderers
from rest_framework import viewsets
from rest_framework.decorators import detail_route
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from rest_framework_tracking.mixins import LoggingMixin
from commonrepo.users.models import User as User
from .permissions import IsOwnerOrReadOnly
from .serializers import UserSerializer, UserSerializerV2, UserLiteSerializer
__author__ = 'yrchen@ATCity.org (Xaver Y.R. Chen)'
class UserViewSet(LoggingMixin, viewsets.ModelViewSet):
"""
This endpoint presents the users in the system. (API version 1)
As you can see, the collection of snippet instances owned by a user are
serialized using a hyperlinked representation.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsOwnerOrReadOnly]
class UserViewSetV2(LoggingMixin, viewsets.ModelViewSet):
"""
This endpoint presents the users in the system. (API version 2)
As you can see, the collection of ELOs and Groups instances owned by a user
are serialized using normal model serializer representation.
"""
queryset = User.objects.all()
serializer_class = UserSerializerV2
permission_classes = [IsOwnerOrReadOnly]
def list(self, request):
queryset = User.objects.all()
serializer = UserLiteSerializer(queryset, many=True)
return Response(serializer.data)
| apache-2.0 |
MingdaZhou/gnuradio | gr-utils/python/modtool/gr-newmod/docs/doxygen/doxyxml/generated/index.py | 344 | 1871 | #!/usr/bin/env python
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
from xml.dom import minidom
import os
import sys
import compound
import indexsuper as supermod
class DoxygenTypeSub(supermod.DoxygenType):
def __init__(self, version=None, compound=None):
supermod.DoxygenType.__init__(self, version, compound)
def find_compounds_and_members(self, details):
"""
Returns a list of all compounds and their members which match details
"""
results = []
for compound in self.compound:
members = compound.find_members(details)
if members:
results.append([compound, members])
else:
if details.match(compound):
results.append([compound, []])
return results
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class CompoundTypeSub(supermod.CompoundType):
def __init__(self, kind=None, refid=None, name='', member=None):
supermod.CompoundType.__init__(self, kind, refid, name, member)
def find_members(self, details):
"""
Returns a list of all members which match details
"""
results = []
for member in self.member:
if details.match(member):
results.append(member)
return results
supermod.CompoundType.subclass = CompoundTypeSub
# end class CompoundTypeSub
class MemberTypeSub(supermod.MemberType):
def __init__(self, kind=None, refid=None, name=''):
supermod.MemberType.__init__(self, kind, refid, name)
supermod.MemberType.subclass = MemberTypeSub
# end class MemberTypeSub
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj
| gpl-3.0 |
procoder317/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
09zwcbupt/undergrad_thesis | ext/poxdesk/qx/tool/pylib/pyparse/pyparsing.py | 8 | 153237 | # module pyparsing.py
#
# Copyright (c) 2003-2010 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#from __future__ import generators
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.5.5"
__versionTime__ = "12 Aug 2010 03:56"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'getTokensEndLoc', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor',
]
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
_PY3K = sys.version_info[0] > 2
if _PY3K:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
_str2dict = set
alphas = string.ascii_lowercase + string.ascii_uppercase
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
def _str2dict(strg):
return dict( [(c,0) for c in strg] )
alphas = string.lowercase + string.uppercase
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len enumerate sorted reversed list tuple set any all".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ['&'+s+';' for s in "amp gt lt quot apos".split()]
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputLine __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{ParseFatalException}, but thrown internally when an
C{ErrorStop} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by C{validate()} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>})
"""
#~ __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop( self, index=-1 ):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""Inserts new element at location index in the list of parsed tokens."""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if True: #name not in self.__slots__:
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
return self.copy()
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + _ustr(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a C{ParseResults} object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
self.__tokdict, \
par, \
inAccumNames, \
self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return dir(super(ParseResults,self)) + self.keys()
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print ("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
"""
newself = self.copy()
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def _normalizeParseActionArgs( f ):
"""Internal method used to decorate parse actions that take fewer than 3 arguments,
so that all parse actions can be called as C{f(s,l,t)}."""
STAR_ARGS = 4
# special handling for single-argument builtins
if (f in singleArgBuiltins):
numargs = 1
else:
try:
restore = None
if isinstance(f,type):
restore = f
f = f.__init__
if not _PY3K:
codeObj = f.func_code
else:
codeObj = f.code
if codeObj.co_flags & STAR_ARGS:
return f
numargs = codeObj.co_argcount
if not _PY3K:
if hasattr(f,"im_self"):
numargs -= 1
else:
if hasattr(f,"__self__"):
numargs -= 1
if restore:
f = restore
except AttributeError:
try:
if not _PY3K:
call_im_func_code = f.__call__.im_func.func_code
else:
call_im_func_code = f.__code__
# not a function, must be a callable object, get info from the
# im_func binding of its bound __call__ method
if call_im_func_code.co_flags & STAR_ARGS:
return f
numargs = call_im_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 0
except AttributeError:
if not _PY3K:
call_func_code = f.__call__.func_code
else:
call_func_code = f.__call__.__code__
# not a bound method, get info directly from __call__ method
if call_func_code.co_flags & STAR_ARGS:
return f
numargs = call_func_code.co_argcount
if not _PY3K:
if hasattr(f.__call__,"im_self"):
numargs -= 1
else:
if hasattr(f.__call__,"__self__"):
numargs -= 1
#~ print ("adding function %s with %d args" % (f.func_name,numargs))
if numargs == 3:
return f
else:
if numargs > 3:
def tmp(s,l,t):
return f(f.__call__.__self__, s,l,t)
if numargs == 2:
def tmp(s,l,t):
return f(l,t)
elif numargs == 1:
def tmp(s,l,t):
return f(t)
else: #~ numargs == 0:
def tmp(s,l,t):
return f()
try:
tmp.__name__ = f.__name__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__doc__ = f.__doc__
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__dict__.update(f.__dict__)
except (AttributeError,TypeError):
# no need for special handling if attribute doesnt exist
pass
return tmp
_normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs)
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(self._normalizeParseActionArgs, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{ParseFatalException}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException:
#~ print ("Exception raised:", err)
err = None
if self.debugActions[2]:
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
if err is None:
err = sys.exc_info()[1]
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
err = sys.exc_info()[1]
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value,Exception):
raise value
return value
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException:
pe = sys.exc_info()[1]
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{StringEnd()}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
#loc = self.preParse( instring, loc )
se = StringEnd()
se._parse( instring, loc )
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def transformString( self, instring ):
"""Extension to C{scanString}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to C{scanString}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def __add__(self, other ):
"""Implementation of + operator - returns And"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns C{And} with error stop"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + ZeroOrMore(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{ZeroOrMore(expr)}
- C{expr*(1,None)} is equivalent to C{OneOrMore(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns C{MatchFirst}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns C{Or}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns C{Each}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{ParserElement}"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{NotAny}"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for C{setResultsName}, with C{listAllMatches=default}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand <TAB>s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match <TAB> characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other.copy() )
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
exc = sys.exc_info()[1]
raise exc
def getException(self):
return ParseException("",0,self.errmsg,self)
def __getattr__(self,aname):
if aname == "myException":
self.myException = ret = self.getException();
return ret;
else:
raise AttributeError("no such attribute " + aname)
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
#self.myException = ParseException("",0,"",self)
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
#s.myException.msg = self.errmsg
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{Literal}::
Literal("if") will match the leading 'if' in 'ifAndOnlyIf'.
Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)'
Accepts two optional constructor arguments in addition to the keyword string:
C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
matching, default is False.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = _str2dict(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ):
super(Word,self).__init__()
self.initCharsOrig = initChars
self.initChars = _str2dict(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = _str2dict(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = _str2dict(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc,result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
#self.myException.msg = self.errmsg
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{Word} class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
#self.myException.msg = self.errmsg
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of wordChars
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = _str2dict(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of wordChars
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = _str2dict(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
#~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
class And(ParseExpression):
"""Requires all given C{ParseExpressions} to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the '+' operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(Empty,self).__init__(*args, **kwargs)
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException:
pe = sys.exc_info()[1]
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the '^' operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException:
err = sys.exc_info()[1]
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the '|' operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given C{ParseExpressions} to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the '&' operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. C{FollowedBy}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. C{NotAny}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If C{include} is set to true, the matched expression is also parsed (the skipped text
and matched expression are returned as a 2-element list). The C{ignore}
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if failOn is not None and isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
# print "found ignoreExpr, advance to", loc
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,IndexError):
if failParse:
raise
else:
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of ParseExpression, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return list(map( string.upper, tokenlist ))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = ParserElement._normalizeParseActionArgs(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = ParserElement._normalizeParseActionArgs(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception:
exc = sys.exc_info()[1]
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to True, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = int(t[0])
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
return ( Word(nums).setName("arrayLen").setParseAction(countFieldParseAction, callDuringTry=True) + arrayExpr )
def _flatten(L):
if type(L) is not list: return [L]
if L == []: return L
return _flatten(L[0]) + _flatten(L[1:])
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{MatchFirst} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,(list,tuple)):
symbols = list(strs[:])
elif isinstance(strs,basestring):
symbols = strs.split()
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
else:
return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{Dict}, C{ZeroOrMore}, and C{Group} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. Simpler to use than the parse action C{keepOriginalText}, and does not
require the inspect module to chase up the call stack. By default, returns a
string containing the original parsed text.
If the optional C{asString} argument is passed as False, then the return value is a
C{ParseResults} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{originalTextFor} contains expressions with defined
results names, you must set C{asString} to False if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
del t[:]
t.insert(0, s[t._original_start:t._original_end])
del t["_original_start"]
del t["_original_end"]
matchExpr.setParseAction(extractText)
return matchExpr
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:unichr(int(t[0],16)))
_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:unichr(int(t[0],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{transformString()}.
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""DEPRECATED - use new helper method C{originalTextFor}.
Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
<TD> or <DIV>.
Call withAttribute with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in (class="Customer",align="right"), or
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
withAttribute.ANY_VALUE as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence( baseExpr, opList ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants opAssoc.RIGHT and opAssoc.LEFT.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr << ( matchExpr | lastExpr )
lastExpr = thisExpr
ret << lastExpr
return ret
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one blockStatement.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = Empty() + Empty().setParseAction(checkSubIndent)
PEER = Empty().setParseAction(checkPeerIndent)
UNDENT = Empty().setParseAction(checkUnindent)
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join( [ c for c in printables if c != "," ] )
_commasepitem = Combine(OneOrMore(Word(_noncomma) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (teststring + "->" + str(tokenlist))
print ("tokens = " + str(tokens))
print ("tokens.columns = " + str(tokens.columns))
print ("tokens.tables = " + str(tokens.tables))
print (tokens.asXML("SQL",True))
except ParseBaseException:
err = sys.exc_info()[1]
print (teststring + "->")
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
| gpl-3.0 |
carlohamalainen/nipype | nipype/interfaces/afni/tests/test_auto_Merge.py | 5 | 1228 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.afni.preprocess import Merge
def test_Merge_inputs():
input_map = dict(args=dict(argstr='%s',
),
blurfwhm=dict(argstr='-1blur_fwhm %d',
units='mm',
),
doall=dict(argstr='-doall',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_files=dict(argstr='%s',
copyfile=False,
mandatory=True,
position=-1,
),
out_file=dict(argstr='-prefix %s',
name_source='in_file',
name_template='%s_merge',
),
outputtype=dict(),
terminal_output=dict(mandatory=True,
nohash=True,
),
)
inputs = Merge.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Merge_outputs():
output_map = dict(out_file=dict(),
)
outputs = Merge.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
darmaa/odoo | addons/hr_evaluation/report/__init__.py | 441 | 1076 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_evaluation_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
CSyllabus/webapp | backend/apps/csyllabusapi/views/tests/test_country.py | 1 | 1963 | from django.test import TestCase, Client
from ...models import Country
from django.core.exceptions import ObjectDoesNotExist
import json
from django.utils import timezone
class CountryViewTestCase(TestCase):
def test_post(self):
country1 = Country.objects.create(name="Italy")
c = Client()
country = {'modified': str(timezone.now()),
'created': str(timezone.now()),
'id': country1.id+1,
'img': None,
'name': 'Sweden'
}
response = c.post('/csyllabusapi/country', json.dumps(country), 'application/json')
countryName = Country.objects.get(id=country1.id+1).name
self.assertEqual(response.status_code, 200)
self.assertEqual(countryName, "Sweden")
def test_delete(self):
country1 = Country.objects.create(name="Italy", img="")
c = Client()
country = {'modified': str(country1.modified),
'created': str(country1.created),
'id': country1.id,
'img': country1.img,
'name': country1.name
}
response = c.delete('/csyllabusapi/country', json.dumps(country), 'application/json')
try:
countryName = Country.objects.get(id=country1.id + 1).name
except ObjectDoesNotExist:
countryName = None
self.assertEqual(response.status_code, 200)
self.assertEqual(countryName, None)
def test_put(self):
country1 = Country.objects.create(name="Croaty")
c = Client()
country = {'id': country1.id,
'name': 'Croatia'
}
response = c.put('/csyllabusapi/country', json.dumps(country), 'application/json')
countryName = Country.objects.get(id=country1.id).name
self.assertEqual(response.status_code, 200)
self.assertEqual(countryName, "Croatia") | mit |
imply/chuu | ppapi/generators/idl_release.py | 63 | 9668 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
IDLRelease for PPAPI
This file defines the behavior of the AST namespace which allows for resolving
a symbol as one or more AST nodes given a Release or range of Releases.
"""
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_option import GetOption, Option, ParseOptions
Option('release_debug', 'Debug Release data')
Option('wgap', 'Ignore Release gap warning')
#
# Module level functions and data used for testing.
#
error = None
warning = None
def ReportReleaseError(msg):
global error
error = msg
def ReportReleaseWarning(msg):
global warning
warning = msg
def ReportClear():
global error, warning
error = None
warning = None
#
# IDLRelease
#
# IDLRelease is an object which stores the association of a given symbol
# name, with an AST node for a range of Releases for that object.
#
# A vmin value of None indicates that the object begins at the earliest
# available Release number. The value of vmin is always inclusive.
# A vmax value of None indicates that the object is never deprecated, so
# it exists until it is overloaded or until the latest available Release.
# The value of vmax is always exclusive, representing the first Release
# on which the object is no longer valid.
class IDLRelease(object):
def __init__(self, rmin, rmax):
self.rmin = rmin
self.rmax = rmax
def __str__(self):
if not self.rmin:
rmin = '0'
else:
rmin = str(self.rmin)
if not self.rmax:
rmax = '+oo'
else:
rmax = str(self.rmax)
return '[%s,%s)' % (rmin, rmax)
def SetReleaseRange(self, rmin, rmax):
self.rmin = rmin
self.rmax = rmax
# True, if Release falls within the interval [self.vmin, self.vmax)
def IsRelease(self, release):
if self.rmax and self.rmax <= release:
return False
if self.rmin and self.rmin > release:
return False
if GetOption('release_debug'):
InfoOut.Log('%f is in %s' % (release, self))
return True
# True, if Release falls within the interval [self.vmin, self.vmax)
def InReleases(self, releases):
if not releases: return False
# Check last release first, since InRange does not match last item
if self.IsRelease(releases[-1]): return True
if len(releases) > 1:
return self.InRange(releases[0], releases[-1])
return False
# True, if interval [vmin, vmax) overlaps interval [self.vmin, self.vmax)
def InRange(self, rmin, rmax):
assert (rmin == None) or rmin < rmax
# An min of None always passes a min bound test
# An max of None always passes a max bound test
if rmin is not None and self.rmax is not None:
if self.rmax <= rmin:
return False
if rmax is not None and self.rmin is not None:
if self.rmin >= rmax:
return False
if GetOption('release_debug'):
InfoOut.Log('%f to %f is in %s' % (rmin, rmax, self))
return True
def GetMinMax(self, releases = None):
if not releases:
return self.rmin, self.rmax
if not self.rmin:
rmin = releases[0]
else:
rmin = str(self.rmin)
if not self.rmax:
rmax = releases[-1]
else:
rmax = str(self.rmax)
return (rmin, rmax)
def SetMin(self, release):
assert not self.rmin
self.rmin = release
def Error(self, msg):
ReportReleaseError(msg)
def Warn(self, msg):
ReportReleaseWarning(msg)
#
# IDLReleaseList
#
# IDLReleaseList is a list based container for holding IDLRelease
# objects in order. The IDLReleaseList can be added to, and searched by
# range. Objects are stored in order, and must be added in order.
#
class IDLReleaseList(object):
def __init__(self):
self._nodes = []
def GetReleases(self):
return self._nodes
def FindRelease(self, release):
for node in self._nodes:
if node.IsRelease(release):
return node
return None
def FindRange(self, rmin, rmax):
assert (rmin == None) or rmin != rmax
out = []
for node in self._nodes:
if node.InRange(rmin, rmax):
out.append(node)
return out
def AddNode(self, node):
if GetOption('release_debug'):
InfoOut.Log('\nAdding %s %s' % (node.Location(), node))
last = None
# Check current releases in that namespace
for cver in self._nodes:
if GetOption('release_debug'): InfoOut.Log(' Checking %s' % cver)
# We should only be missing a 'release' tag for the first item.
if not node.rmin:
node.Error('Missing release on overload of previous %s.' %
cver.Location())
return False
# If the node has no max, then set it to this one
if not cver.rmax:
cver.rmax = node.rmin
if GetOption('release_debug'): InfoOut.Log(' Update %s' % cver)
# if the max and min overlap, than's an error
if cver.rmax > node.rmin:
if node.rmax and cver.rmin >= node.rmax:
node.Error('Declarations out of order.')
else:
node.Error('Overlap in releases: %s vs %s when adding %s' %
(cver.rmax, node.rmin, node))
return False
last = cver
# Otherwise, the previous max and current min should match
# unless this is the unlikely case of something being only
# temporarily deprecated.
if last and last.rmax != node.rmin:
node.Warn('Gap in release numbers.')
# If we made it here, this new node must be the 'newest'
# and does not overlap with anything previously added, so
# we can add it to the end of the list.
if GetOption('release_debug'): InfoOut.Log('Done %s' % node)
self._nodes.append(node)
return True
#
# IDLReleaseMap
#
# A release map, can map from an float interface release, to a global
# release string.
#
class IDLReleaseMap(object):
def __init__(self, release_info):
self.version_to_release = {}
self.release_to_version = {}
for release, version in release_info:
self.version_to_release[version] = release
self.release_to_version[release] = version
self.releases = sorted(self.release_to_version.keys())
self.versions = sorted(self.version_to_release.keys())
def GetVersion(self, release):
return self.release_to_version.get(release, None)
def GetVersions(self):
return self.versions
def GetRelease(self, version):
return self.version_to_release.get(version, None)
def GetReleases(self):
return self.releases
def GetReleaseRange(self):
return (self.releases[0], self.releases[-1])
def GetVersionRange(self):
return (self.versions[0], self.version[-1])
#
# Test Code
#
def TestReleaseNode():
FooXX = IDLRelease(None, None)
Foo1X = IDLRelease('M14', None)
Foo23 = IDLRelease('M15', 'M16')
assert FooXX.IsRelease('M13')
assert FooXX.IsRelease('M14')
assert FooXX.InRange('M13', 'M13A')
assert FooXX.InRange('M14','M15')
assert not Foo1X.IsRelease('M13')
assert Foo1X.IsRelease('M14')
assert Foo1X.IsRelease('M15')
assert not Foo1X.InRange('M13', 'M14')
assert not Foo1X.InRange('M13A', 'M14')
assert Foo1X.InRange('M14', 'M15')
assert Foo1X.InRange('M15', 'M16')
assert not Foo23.InRange('M13', 'M14')
assert not Foo23.InRange('M13A', 'M14')
assert not Foo23.InRange('M14', 'M15')
assert Foo23.InRange('M15', 'M16')
assert Foo23.InRange('M14', 'M15A')
assert Foo23.InRange('M15B', 'M17')
assert not Foo23.InRange('M16', 'M17')
print "TestReleaseNode - Passed"
def TestReleaseListWarning():
FooXX = IDLRelease(None, None)
Foo1X = IDLRelease('M14', None)
Foo23 = IDLRelease('M15', 'M16')
Foo45 = IDLRelease('M17', 'M18')
# Add nodes out of order should fail
ReportClear()
releases = IDLReleaseList()
assert releases.AddNode(Foo23)
assert releases.AddNode(Foo45)
assert warning
print "TestReleaseListWarning - Passed"
def TestReleaseListError():
FooXX = IDLRelease(None, None)
Foo1X = IDLRelease('M14', None)
Foo23 = IDLRelease('M15', 'M16')
Foo45 = IDLRelease('M17', 'M18')
# Add nodes out of order should fail
ReportClear()
releases = IDLReleaseList()
assert releases.AddNode(FooXX)
assert releases.AddNode(Foo23)
assert not releases.AddNode(Foo1X)
assert error
print "TestReleaseListError - Passed"
def TestReleaseListOK():
FooXX = IDLRelease(None, None)
Foo1X = IDLRelease('M14', None)
Foo23 = IDLRelease('M15', 'M16')
Foo45 = IDLRelease('M17', 'M18')
# Add nodes in order should work
ReportClear()
releases = IDLReleaseList()
assert releases.AddNode(FooXX)
assert releases.AddNode(Foo1X)
assert releases.AddNode(Foo23)
assert not error and not warning
assert releases.AddNode(Foo45)
assert warning
assert releases.FindRelease('M13') == FooXX
assert releases.FindRelease('M14') == Foo1X
assert releases.FindRelease('M15') == Foo23
assert releases.FindRelease('M16') == None
assert releases.FindRelease('M17') == Foo45
assert releases.FindRelease('M18') == None
assert releases.FindRange('M13','M14') == [FooXX]
assert releases.FindRange('M13','M17') == [FooXX, Foo1X, Foo23]
assert releases.FindRange('M16','M17') == []
assert releases.FindRange(None, None) == [FooXX, Foo1X, Foo23, Foo45]
# Verify we can find the correct versions
print "TestReleaseListOK - Passed"
def TestReleaseMap():
print "TestReleaseMap- Passed"
def Main(args):
TestReleaseNode()
TestReleaseListWarning()
TestReleaseListError()
TestReleaseListOK()
print "Passed"
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
heke123/chromium-crosswalk | chrome/common/extensions/docs/server2/fake_fetchers.py | 36 | 4590 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# These are fake fetchers that are used for testing and the preview server.
# They return canned responses for URLs. url_fetcher_fake.py uses the fake
# fetchers if other URL fetching APIs are unavailable.
import base64
import json
import os
import re
import url_fetcher_fake
from extensions_paths import SERVER2
from path_util import IsDirectory
from test_util import ReadFile, ChromiumPath
import url_constants
# TODO(kalman): Investigate why logging in this class implies that the server
# isn't properly caching some fetched files; often it fetches the same file
# 10+ times. This may be a test anomaly.
def _ReadTestData(*path, **read_args):
return ReadFile(SERVER2, 'test_data', *path, **read_args)
class _FakeFetcher(object):
def _ListDir(self, path):
return os.listdir(path)
def _IsDir(self, path):
return os.path.isdir(path)
def _Stat(self, path):
return int(os.stat(path).st_mtime)
class _FakeOmahaProxy(_FakeFetcher):
def fetch(self, url):
return _ReadTestData('branch_utility', 'first.json')
class _FakeOmahaHistory(_FakeFetcher):
def fetch(self, url):
return _ReadTestData('branch_utility', 'second.json')
_SVN_URL_TO_PATH_PATTERN = re.compile(
r'^.*chrome/.*(trunk|branches/.*)/src/?([^?]*).*?')
def _ExtractPathFromSvnUrl(url):
return _SVN_URL_TO_PATH_PATTERN.match(url).group(2)
class _FakeSubversionServer(_FakeFetcher):
def fetch(self, url):
path = _ExtractPathFromSvnUrl(url)
if IsDirectory(path):
html = ['<html>Revision 000000']
try:
for f in self._ListDir(ChromiumPath(path)):
if f.startswith('.'):
continue
if self._IsDir(ChromiumPath(path, f)):
html.append('<a>' + f + '/</a>')
else:
html.append('<a>' + f + '</a>')
html.append('</html>')
return '\n'.join(html)
except OSError as e:
return None
try:
return ReadFile(path)
except IOError:
return None
class _FakeViewvcServer(_FakeFetcher):
def fetch(self, url):
path = ChromiumPath(_ExtractPathFromSvnUrl(url))
if self._IsDir(path):
html = ['<table><tbody><tr>...</tr>']
# The version of the directory.
dir_stat = self._Stat(path)
html.append('<tr>')
html.append('<td>Directory revision:</td>')
html.append('<td><a>%s</a><a></a></td>' % dir_stat)
html.append('</tr>')
# The version of each file.
for f in self._ListDir(path):
if f.startswith('.'):
continue
html.append('<tr>')
html.append(' <td><a>%s%s</a></td>' % (
f, '/' if self._IsDir(os.path.join(path, f)) else ''))
html.append(' <td><a><strong>%s</strong></a></td>' %
self._Stat(os.path.join(path, f)))
html.append('<td></td><td></td><td></td>')
html.append('</tr>')
html.append('</tbody></table>')
return '\n'.join(html)
try:
return ReadFile(path)
except IOError:
return None
class _FakeGithubStat(_FakeFetcher):
def fetch(self, url):
return '{ "sha": 0 }'
class _FakeGithubZip(_FakeFetcher):
def fetch(self, url):
return _ReadTestData('github_file_system', 'apps_samples.zip', mode='rb')
class _FakeRietveldAPI(_FakeFetcher):
def __init__(self):
self._base_pattern = re.compile(r'.*/(api/.*)')
def fetch(self, url):
return _ReadTestData(
'rietveld_patcher', self._base_pattern.match(url).group(1), 'json')
class _FakeRietveldTarball(_FakeFetcher):
def __init__(self):
self._base_pattern = re.compile(r'.*/(tarball/\d+/\d+)')
def fetch(self, url):
return _ReadTestData(
'rietveld_patcher', self._base_pattern.match(url).group(1) + '.tar.bz2',
mode='rb')
def ConfigureFakeFetchers():
'''Configure the fake fetcher paths relative to the docs directory.
'''
url_fetcher_fake.ConfigureFakeUrlFetch({
url_constants.OMAHA_HISTORY: _FakeOmahaHistory(),
url_constants.OMAHA_PROXY_URL: _FakeOmahaProxy(),
'%s/.*' % url_constants.SVN_URL: _FakeSubversionServer(),
'%s/.*' % url_constants.VIEWVC_URL: _FakeViewvcServer(),
'%s/.*/commits/.*' % url_constants.GITHUB_REPOS: _FakeGithubStat(),
'%s/.*/zipball' % url_constants.GITHUB_REPOS: _FakeGithubZip(),
'%s/api/.*' % url_constants.CODEREVIEW_SERVER: _FakeRietveldAPI(),
'%s/tarball/.*' % url_constants.CODEREVIEW_SERVER: _FakeRietveldTarball(),
})
| bsd-3-clause |
zach-/roll20_britannia | image_processing/image_knit.py | 1 | 1802 | import json
from PIL import Image, ImageTk
from cv2 import imread, imwrite
from numpy import zeros, unique
from tkinter import Toplevel, Label
from glob import glob
import os
from re import search
def change_values(data):
removex = data[0]["X"]
removey = data[0]["Y"]
for cell in data:
cell["X"] = cell["X"] - removex
cell["Y"] = cell["Y"] - removey
def main():
file_list = glob('../map_json/World*.txt')
dir = '../tileset/tile_'
ext = '.jpg'
coorx = 9920
coory = 15872
for file in file_list:
num = search('\d+', file)
print('Started processing wolrd file ' + str(num.group(0)))
new_img = Image.new('RGB', (coorx, coory))
with open(file) as json_data:
data = json.load(json_data)
change_values(data)
for cell in xrange(len(data)):
try:
img = Image.open(dir + str(data[cell]["ID"]) + ext)
# img = Image.open(dir + cell["ID"] + ext)
new_img.paste(img, (data[cell]["X"] * 31,
data[cell]["Y"] * 31))
# new_img.paste(img, (cell["X"], cell["Y"]))
except IOError as err:
print err
filename = '../map_images/dungeon_file_' + str(num.group(0)) + '.jpg'
new_img.save(filename)
print('Finished processing world file ' + str(num.group(0)))
# for x in xrange(len(data)):
# tupl = (data[x]["ID"])
# tuple_list.append(tupl)
#
# for y in xrange(0, coory, 31):
# for x in xrange(0, coorx, 31):
#
# new_img.paste(img, (x, y))
#
# new_img.show()
# print tuple_list
# test()
# json format: {"CoordX":544,"CoordY":0,"Name":"water","ID":170,"CoordZ":-5}
if __name__ == '__main__':
main()
| mit |
nesterione/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 286 | 3531 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
pdellaert/ansible | test/lib/ansible_test/_internal/sanity/yamllint.py | 14 | 3420 | """Sanity test using yamllint."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
from .. import types as t
from ..sanity import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SANITY_ROOT,
)
from ..target import (
TestTarget,
)
from ..util import (
SubprocessError,
display,
is_subdir,
find_python,
)
from ..util_common import (
run_command,
)
from ..config import (
SanityConfig,
)
from ..data import (
data_context,
)
class YamllintTest(SanitySingleVersion):
"""Sanity test using yamllint."""
@property
def error_code(self): # type: () -> t.Optional[str]
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return 'ansible-test'
def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
"""Return the given list of test targets, filtered to include only those relevant for the test."""
yaml_targets = [target for target in targets if os.path.splitext(target.path)[1] in ('.yml', '.yaml')]
for plugin_type, plugin_path in sorted(data_context().content.plugin_paths.items()):
if plugin_type == 'module_utils':
continue
yaml_targets.extend([target for target in targets if
os.path.splitext(target.path)[1] == '.py' and
os.path.basename(target.path) != '__init__.py' and
is_subdir(target.path, plugin_path)])
return yaml_targets
def test(self, args, targets, python_version):
"""
:type args: SanityConfig
:type targets: SanityTargets
:type python_version: str
:rtype: TestResult
"""
settings = self.load_processor(args)
paths = [target.path for target in targets.include]
python = find_python(python_version)
results = self.test_paths(args, paths, python)
results = settings.process_errors(results, paths)
if results:
return SanityFailure(self.name, messages=results)
return SanitySuccess(self.name)
@staticmethod
def test_paths(args, paths, python):
"""
:type args: SanityConfig
:type paths: list[str]
:type python: str
:rtype: list[SanityMessage]
"""
cmd = [
python,
os.path.join(SANITY_ROOT, 'yamllint', 'yamllinter.py'),
]
data = '\n'.join(paths)
display.info(data, verbosity=4)
try:
stdout, stderr = run_command(args, cmd, data=data, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return []
results = json.loads(stdout)['messages']
results = [SanityMessage(
code=r['code'],
message=r['message'],
path=r['path'],
line=int(r['line']),
column=int(r['column']),
level=r['level'],
) for r in results]
return results
| gpl-3.0 |
paytokens-beta/paytokensd | lib/rpsresolve.py | 1 | 6548 | #! /usr/bin/python3
import binascii
import struct
from . import (util, config, exceptions, jetcoin, util, rps)
# move random rps_match_id
FORMAT = '>H16s32s32s'
LENGTH = 2 + 16 + 32 + 32
ID = 81
def validate (db, source, move, random, rps_match_id):
problems = []
rps_match = None
if not isinstance(move, int):
problems.append('move must be a integer')
return None, None, problems
try:
random_bytes = binascii.unhexlify(random)
except:
problems.append('random must be an hexadecimal string')
return None, None, problems
if len(random_bytes) != 16:
problems.append('random must be 16 bytes in hexadecimal format')
return None, None, problems
cursor = db.cursor()
rps_matches = list(cursor.execute('''SELECT * FROM rps_matches WHERE id = ?''', (rps_match_id,)))
cursor.close()
if len(rps_matches) == 0:
problems.append('no such rps match')
return None, rps_match, problems
elif len(rps_matches) > 1:
assert False
rps_match = rps_matches[0]
if move<1:
problems.append('move must be greater than 0')
elif move > rps_match['possible_moves']:
problems.append('move must be lower than {}'.format(rps_match['possible_moves']))
if source not in [rps_match['tx0_address'], rps_match['tx1_address']]:
problems.append('invalid source address')
return None, rps_match, problems
if rps_match['tx0_address'] == source:
txn = 0
rps_match_status = ['pending', 'pending and resolved']
else:
txn = 1
rps_match_status = ['pending', 'resolved and pending']
move_random_hash = jetcoin.dhash(random_bytes + int(move).to_bytes(2, byteorder='big'))
move_random_hash = binascii.hexlify(move_random_hash).decode('utf-8')
if rps_match['tx{}_move_random_hash'.format(txn)] != move_random_hash:
problems.append('invalid move or random value')
return txn, rps_match, problems
if rps_match['status'] == 'expired':
problems.append('rps match expired')
elif rps_match['status'].startswith('concluded'):
problems.append('rps match concluded')
elif rps_match['status'].startswith('invalid'):
problems.append('rps match invalid')
elif rps_match['status'] not in rps_match_status:
problems.append('rps already resolved')
return txn, rps_match, problems
def compose (db, source, move, random, rps_match_id):
tx0_hash, tx1_hash = rps_match_id[:64], rps_match_id[64:] # UTF-8 encoding means that the indices are doubled.
txn, rps_match, problems = validate(db, source, move, random, rps_match_id)
if problems: raise exceptions.RpsError(problems)
# Warn if down to the wire.
time_left = rps_match['match_expire_index'] - util.last_block(db)['block_index']
if time_left < 4:
print('WARNING: Only {} blocks until that rps match expires. The conclusion might not make into the blockchain in time.'.format(time_left))
tx0_hash_bytes = binascii.unhexlify(bytes(tx0_hash, 'utf-8'))
tx1_hash_bytes = binascii.unhexlify(bytes(tx1_hash, 'utf-8'))
random_bytes = binascii.unhexlify(bytes(random, 'utf-8'))
data = config.PREFIX + struct.pack(config.TXTYPE_FORMAT, ID)
data += struct.pack(FORMAT, move, random_bytes, tx0_hash_bytes, tx1_hash_bytes)
return (source, [], data)
def parse (db, tx, message):
cursor = db.cursor()
# Unpack message.
try:
assert len(message) == LENGTH
move, random, tx0_hash_bytes, tx1_hash_bytes = struct.unpack(FORMAT, message)
tx0_hash, tx1_hash = binascii.hexlify(tx0_hash_bytes).decode('utf-8'), binascii.hexlify(tx1_hash_bytes).decode('utf-8')
rps_match_id = tx0_hash + tx1_hash
random = binascii.hexlify(random).decode('utf-8')
status = 'valid'
except (AssertionError, struct.error) as e:
move, random, tx0_hash, tx1_hash, rps_match_id = None, None, None, None, None
status = 'invalid: could not unpack'
if status == 'valid':
txn, rps_match, problems = validate(db, tx['source'], move, random, rps_match_id)
if problems:
rps_match = None
status = 'invalid: ' + '; '.join(problems)
# Add parsed transaction to message-type–specific table.
rpsresolves_bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'move': move,
'random': random,
'rps_match_id': rps_match_id,
'status': status
}
if status == 'valid':
rps_match_status = 'concluded'
if rps_match['status'] == 'pending':
rps_match_status = 'resolved and pending' if txn==0 else 'pending and resolved'
if rps_match_status == 'concluded':
counter_txn = 0 if txn == 1 else 1
counter_source = rps_match['tx{}_address'.format(counter_txn)]
sql = '''SELECT * FROM rpsresolves WHERE rps_match_id = ? AND source = ? AND status = ?'''
counter_games = list(cursor.execute(sql, (rps_match_id, counter_source, 'valid')))
assert len(counter_games) == 1
counter_game = counter_games[0]
winner = resolve_game(db, rpsresolves_bindings, counter_game)
if winner == 0:
rps_match_status = 'concluded: tie'
elif winner == counter_game['tx_index']:
rps_match_status = 'concluded: {} player wins'.format('first' if counter_txn == 0 else 'second')
else:
rps_match_status = 'concluded: {} player wins'.format('first' if txn == 0 else 'second')
rps.update_rps_match_status(db, rps_match, rps_match_status, tx['block_index'])
sql = '''INSERT INTO rpsresolves VALUES (:tx_index, :tx_hash, :block_index, :source, :move, :random, :rps_match_id, :status)'''
cursor.execute(sql, rpsresolves_bindings)
cursor.close()
# https://en.wikipedia.org/wiki/Rock-paper-scissors#Additional_weapons:
def resolve_game(db, resovlerps1, resovlerps2):
move1 = resovlerps1['move']
move2 = resovlerps2['move']
same_parity = (move1 % 2) == (move2 % 2)
if (same_parity and move1 < move2) or (not same_parity and move1 > move2):
return resovlerps1['tx_index']
elif (same_parity and move1 > move2) or (not same_parity and move1 < move2):
return resovlerps2['tx_index']
else:
return 0
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit |
mancoast/CPythonPyc_test | fail/321_threaded_import_hangers.py | 57 | 1428 | # This is a helper module for test_threaded_import. The test imports this
# module, and this module tries to run various Python library functions in
# their own thread, as a side effect of being imported. If the spawned
# thread doesn't complete in TIMEOUT seconds, an "appeared to hang" message
# is appended to the module-global `errors` list. That list remains empty
# if (and only if) all functions tested complete.
TIMEOUT = 10
import threading
import tempfile
import os.path
errors = []
# This class merely runs a function in its own thread T. The thread importing
# this module holds the import lock, so if the function called by T tries
# to do its own imports it will block waiting for this module's import
# to complete.
class Worker(threading.Thread):
def __init__(self, function, args):
threading.Thread.__init__(self)
self.function = function
self.args = args
def run(self):
self.function(*self.args)
for name, func, args in [
# Bug 147376: TemporaryFile hung on Windows, starting in Python 2.4.
("tempfile.TemporaryFile", lambda: tempfile.TemporaryFile().close(), ()),
# The real cause for bug 147376: ntpath.abspath() caused the hang.
("os.path.abspath", os.path.abspath, ('.',)),
]:
t = Worker(func, args)
t.start()
t.join(TIMEOUT)
if t.is_alive():
errors.append("%s appeared to hang" % name)
| gpl-3.0 |
harshita-gupta/Harvard-FRSEM-Catalog-2016-17 | flask/lib/python2.7/site-packages/openid/store/nonce.py | 180 | 2843 | __all__ = [
'split',
'mkNonce',
'checkTimestamp',
]
from openid import cryptutil
from time import strptime, strftime, gmtime, time
from calendar import timegm
import string
NONCE_CHARS = string.ascii_letters + string.digits
# Keep nonces for five hours (allow five hours for the combination of
# request time and clock skew). This is probably way more than is
# necessary, but there is not much overhead in storing nonces.
SKEW = 60 * 60 * 5
time_fmt = '%Y-%m-%dT%H:%M:%SZ'
time_str_len = len('0000-00-00T00:00:00Z')
def split(nonce_string):
"""Extract a timestamp from the given nonce string
@param nonce_string: the nonce from which to extract the timestamp
@type nonce_string: str
@returns: A pair of a Unix timestamp and the salt characters
@returntype: (int, str)
@raises ValueError: if the nonce does not start with a correctly
formatted time string
"""
timestamp_str = nonce_string[:time_str_len]
try:
timestamp = timegm(strptime(timestamp_str, time_fmt))
except AssertionError: # Python 2.2
timestamp = -1
if timestamp < 0:
raise ValueError('time out of range')
return timestamp, nonce_string[time_str_len:]
def checkTimestamp(nonce_string, allowed_skew=SKEW, now=None):
"""Is the timestamp that is part of the specified nonce string
within the allowed clock-skew of the current time?
@param nonce_string: The nonce that is being checked
@type nonce_string: str
@param allowed_skew: How many seconds should be allowed for
completing the request, allowing for clock skew.
@type allowed_skew: int
@param now: The current time, as a Unix timestamp
@type now: int
@returntype: bool
@returns: Whether the timestamp is correctly formatted and within
the allowed skew of the current time.
"""
try:
stamp, _ = split(nonce_string)
except ValueError:
return False
else:
if now is None:
now = time()
# Time after which we should not use the nonce
past = now - allowed_skew
# Time that is too far in the future for us to allow
future = now + allowed_skew
# the stamp is not too far in the future and is not too far in
# the past
return past <= stamp <= future
def mkNonce(when=None):
"""Generate a nonce with the current timestamp
@param when: Unix timestamp representing the issue time of the
nonce. Defaults to the current time.
@type when: int
@returntype: str
@returns: A string that should be usable as a one-way nonce
@see: time
"""
salt = cryptutil.randomString(6, NONCE_CHARS)
if when is None:
t = gmtime()
else:
t = gmtime(when)
time_str = strftime(time_fmt, t)
return time_str + salt
| mit |
todaychi/hue | desktop/core/ext-py/pycparser-2.14/examples/c-to-c.py | 19 | 1578 | #------------------------------------------------------------------------------
# pycparser: c-to-c.py
#
# Example of using pycparser.c_generator, serving as a simplistic translator
# from C to AST and back to C.
#
# Copyright (C) 2008-2015, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
from __future__ import print_function
import sys
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
#
sys.path.extend(['.', '..'])
from pycparser import parse_file, c_parser, c_generator
def translate_to_c(filename):
""" Simply use the c_generator module to emit a parsed AST.
"""
ast = parse_file(filename, use_cpp=True)
generator = c_generator.CGenerator()
print(generator.visit(ast))
def _zz_test_translate():
# internal use
src = r'''
void f(char * restrict joe){}
int main(void)
{
unsigned int long k = 4;
int p = - - k;
return 0;
}
'''
parser = c_parser.CParser()
ast = parser.parse(src)
ast.show()
generator = c_generator.CGenerator()
print(generator.visit(ast))
# tracing the generator for debugging
#~ import trace
#~ tr = trace.Trace(countcallers=1)
#~ tr.runfunc(generator.visit, ast)
#~ tr.results().write_results()
#------------------------------------------------------------------------------
if __name__ == "__main__":
#_zz_test_translate()
if len(sys.argv) > 1:
translate_to_c(sys.argv[1])
else:
print("Please provide a filename as argument")
| apache-2.0 |
zstackorg/zstack-woodpecker | zstackwoodpecker/zstackwoodpecker/header/host.py | 4 | 1304 | import zstackwoodpecker.header.header as zstack_header
CONNECTED = 'Connected'
CONNECTING = 'Connecting'
DISCONNECTED = 'Disconnected'
ENABLED = 'Enabled'
DISABLED = 'Disabled'
PREMAINTENANCE = 'PreMaintenance'
MAINTENANCE = 'Maintenance'
class TestHost(zstack_header.ZstackObject):
def __init__(self):
self.host = None
self.connection_state = None
self.state = None
def __repr__(self):
if self.host:
return '%s-%s' % (self.__class__.__name__, self.host.uuid)
return '%s-None' % self.__class__.__name__
def add(self):
self.connection_state = CONNECTED
self.state = ENABLED
def delete(self):
self.state = None
self.connection_state = None
def maintain(self):
self.connection_state = DISCONNECTED
self.state = MAINTENANCE
def reconnect(self):
self.connection_state = CONNECTED
self.state = ENABLED
def disable(self):
self.state = DISABLED
self.connection_state = DISCONNECTED
def enable(self):
self.state = ENABLED
def check(self):
pass
def get_host(self):
return self.host
def get_state(self):
return self.state
def get_connection_state(self):
return self.connection_state
| apache-2.0 |
peterlauri/django | django/contrib/admin/utils.py | 14 | 19023 | from __future__ import unicode_literals
import datetime
import decimal
from collections import defaultdict
from django.contrib.auth import get_permission_codename
from django.core.exceptions import FieldDoesNotExist
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.sql.constants import QUERY_TERMS
from django.forms.utils import pretty_name
from django.urls import NoReverseMatch, reverse
from django.utils import formats, six, timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils.translation import (
override as translation_override, ungettext,
)
class FieldIsAForeignKeyColumnName(Exception):
"""A field is a foreign key attname, i.e. <FK>_id."""
pass
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
lookup_fields = lookup_path.split(LOOKUP_SEP)
# Remove the last item of the lookup path if it is a query term
if lookup_fields[-1] in QUERY_TERMS:
lookup_fields = lookup_fields[:-1]
# Now go through the fields (following all relations) and look for an m2m
for field_name in lookup_fields:
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info'):
# This field is a relation, update opts to follow the relation
path_info = field.get_path_info()
opts = path_info[-1].to_opts
if any(path.m2m for path in path_info):
# This field is a m2m relation so we know we need to call distinct
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\n\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
model_count = {model._meta.verbose_name_plural: len(objs) for model, objs in collector.model_objs.items()}
return to_delete, model_count, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_objs = defaultdict(set)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_objs[obj._meta.model].add(obj)
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except (FieldDoesNotExist, FieldIsAForeignKeyColumnName):
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
"""
For historical reasons, the admin app relies on GenericForeignKeys as being
"not found" by get_field(). This could likely be cleaned up.
Reverse relations should also be excluded as these aren't attributes of the
model (rather something like `foo_set`).
"""
field = opts.get_field(name)
if (field.is_relation and
# Generic foreign keys OR reverse relations
((field.many_to_one and not field.related_model) or field.one_to_many)):
raise FieldDoesNotExist()
# Avoid coercing <FK>_id fields to FK
if field.is_relation and not field.many_to_many and hasattr(field, 'attname') and field.attname == name:
raise FieldIsAForeignKeyColumnName()
return field
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.related_model._meta.verbose_name
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
except FieldIsAForeignKeyColumnName:
label = pretty_name(name)
attr = name
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except (FieldDoesNotExist, FieldIsAForeignKeyColumnName):
pass
else:
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field, empty_value_display):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
if getattr(field, 'flatchoices', None):
return dict(field.flatchoices).get(value, empty_value_display)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return empty_value_display
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, (models.IntegerField, models.FloatField)):
return formats.number_format(value)
elif isinstance(field, models.FileField) and value:
return format_html('<a href="{}">{}</a>', value.url, value)
else:
return display_for_value(value, empty_value_display)
def display_for_value(value, empty_value_display, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
if boolean:
return _boolean_icon(value)
elif value is None:
return empty_value_display
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
elif isinstance(value, (list, tuple)):
return ', '.join(force_text(v) for v in value)
else:
return force_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field = parent._meta.get_field(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
# Field should point to another model
if field.is_relation and not (field.auto_created and not field.concrete):
related_name = field.related_query_name()
parent = field.remote_field.model
else:
related_name = field.field.name
parent = field.related_model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
def construct_change_message(form, formsets, add):
"""
Construct a JSON structure describing changes from a changed object.
Translations are deactivated so that strings are stored untranslated.
Translation happens later on LogEntry access.
"""
change_message = []
if add:
change_message.append({'added': {}})
elif form.changed_data:
change_message.append({'changed': {'fields': form.changed_data}})
if formsets:
with translation_override(None):
for formset in formsets:
for added_object in formset.new_objects:
change_message.append({
'added': {
'name': force_text(added_object._meta.verbose_name),
'object': force_text(added_object),
}
})
for changed_object, changed_fields in formset.changed_objects:
change_message.append({
'changed': {
'name': force_text(changed_object._meta.verbose_name),
'object': force_text(changed_object),
'fields': changed_fields,
}
})
for deleted_object in formset.deleted_objects:
change_message.append({
'deleted': {
'name': force_text(deleted_object._meta.verbose_name),
'object': force_text(deleted_object),
}
})
return change_message
| bsd-3-clause |
konstantinoskostis/sqlalchemy-utils | sqlalchemy_utils/primitives/currency.py | 5 | 2588 | # -*- coding: utf-8 -*-
import six
from .. import i18n, ImproperlyConfigured
from ..utils import str_coercible
@str_coercible
class Currency(object):
"""
Currency class wraps a 3-letter currency code. It provides various
convenience properties and methods.
::
from babel import Locale
from sqlalchemy_utils import Currency, i18n
# First lets add a locale getter for testing purposes
i18n.get_locale = lambda: Locale('en')
Currency('USD').name # US Dollar
Currency('USD').symbol # $
Currency(Currency('USD')).code # 'USD'
Currency always validates the given code.
::
Currency(None) # raises TypeError
Currency('UnknownCode') # raises ValueError
Currency supports equality operators.
::
Currency('USD') == Currency('USD')
Currency('USD') != Currency('EUR')
Currencies are hashable.
::
len(set([Currency('USD'), Currency('USD')])) # 1
"""
def __init__(self, code):
if i18n.babel is None:
raise ImproperlyConfigured(
"'babel' package is required in order to use Currency class."
)
if isinstance(code, Currency):
self.code = code
elif isinstance(code, six.string_types):
self.validate(code)
self.code = code
else:
raise TypeError(
'First argument given to Currency constructor should be '
'either an instance of Currency or valid three letter '
'currency code.'
)
@classmethod
def validate(self, code):
try:
i18n.babel.Locale('en').currencies[code]
except KeyError:
raise ValueError("'{0}' is not valid currency code.".format(code))
@property
def symbol(self):
return i18n.babel.numbers.get_currency_symbol(
self.code,
i18n.get_locale()
)
@property
def name(self):
return i18n.get_locale().currencies[self.code]
def __eq__(self, other):
if isinstance(other, Currency):
return self.code == other.code
elif isinstance(other, six.string_types):
return self.code == other
else:
return NotImplemented
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.code)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.code)
def __unicode__(self):
return self.code
| bsd-3-clause |
DamourYouKnow/HAHA-NO-UR | main.py | 1 | 2130 | '''
A discord bot for scouting in Love Live: School Idol Festival.
'''
import sys
from asyncio import get_event_loop
from json import load
from time import time
from threading import Thread
from commands import *
from bot import HahaNoUR, get_session_manager
from bot.logger import setup_logging
from config import config_path
from data_controller.mongo import MongoClient
from logs import log_path
from data_controller.card_updater import update_task
from discord import channel
class Overwrites:
__slots__ = ('id', 'allow', 'deny', 'type')
def __init__(self, **kwargs):
self.id = kwargs.pop('id')
self.allow = kwargs.pop('allow', 0)
self.deny = kwargs.pop('deny', 0)
self.type = kwargs.pop('type')
def _asdict(self):
return {
'id': self.id,
'allow': self.allow,
'deny': self.deny,
'type': self.type,
}
channel.Overwrites = Overwrites
def main():
shard = None
shard_count = None
if len(sys.argv) > 2:
shard = int(sys.argv[1])
shard_count = int(sys.argv[2])
print(shard)
start_time = int(time())
logger = setup_logging(start_time, log_path)
loop = get_event_loop()
session_manager = loop.run_until_complete(get_session_manager(logger))
with config_path.joinpath('config.json').open() as f:
config = load(f)
with config_path.joinpath('auth.json').open() as f:
auth = load(f)
db = MongoClient() if config.get('mongo', True) else None
bot = HahaNoUR(
config['default_prefix'], start_time, int(config['colour'], base=16),
logger, session_manager, db, auth['error_log'], auth['feedback_log'],
shard, shard_count
)
bot.remove_command('help')
cogs = [
Scout(bot),
Album(bot),
Info(bot),
Stats(bot),
Config(bot)
]
if shard == 0:
card_update_thread = Thread(target=update_task)
card_update_thread.setDaemon(True)
card_update_thread.start()
bot.start_bot(cogs, auth['token'])
if __name__ == '__main__':
main()
| mit |
indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/tools/site_compare/scrapers/ie/ie7.py | 189 | 5669 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Does scraping for all known versions of IE."""
import pywintypes
import time
import types
from drivers import keyboard
from drivers import mouse
from drivers import windowing
# Default version
version = "7.0.5730.1"
DEFAULT_PATH = r"c:\program files\internet explorer\iexplore.exe"
def GetBrowser(path):
"""Invoke the IE browser and return the process, frame, and content window.
Args:
path: full path to browser
Returns:
A tuple of (process handle, render pane)
"""
if not path: path = DEFAULT_PATH
(iewnd, ieproc, address_bar, render_pane, tab_window) = InvokeBrowser(path)
return (ieproc, iewnd, render_pane)
def InvokeBrowser(path):
"""Invoke the IE browser.
Args:
path: full path to browser
Returns:
A tuple of (main window, process handle, address bar,
render_pane, tab_window)
"""
# Invoke IE
(ieproc, iewnd) = windowing.InvokeAndWait(path)
# Get windows we'll need
for tries in xrange(10):
try:
address_bar = windowing.FindChildWindow(
iewnd, "WorkerW|Navigation Bar/ReBarWindow32/"
"Address Band Root/ComboBoxEx32/ComboBox/Edit")
render_pane = windowing.FindChildWindow(
iewnd, "TabWindowClass/Shell DocObject View")
tab_window = windowing.FindChildWindow(
iewnd, "CommandBarClass/ReBarWindow32/TabBandClass/DirectUIHWND")
except IndexError:
time.sleep(1)
continue
break
return (iewnd, ieproc, address_bar, render_pane, tab_window)
def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
Args:
urls: list of URLs to scrape
outdir: directory to place output
size: size of browser window to use
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
None if success, else an error string
"""
path = r"c:\program files\internet explorer\iexplore.exe"
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
(iewnd, ieproc, address_bar, render_pane, tab_window) = (
InvokeBrowser(path) )
# Resize and reposition the frame
windowing.MoveAndSizeWindow(iewnd, pos, size, render_pane)
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
timedout = False
for url in urls:
# Double-click in the address bar, type the name, and press Enter
mouse.DoubleClickInWindow(address_bar)
keyboard.TypeString(url)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(
tab_window, (6, 8, 22, 24), timeout)
timedout = load_time < 0
if timedout:
break
# Scrape the page
image = windowing.ScrapeWindow(render_pane)
# Save to disk
if "filename" in kwargs:
if callable(kwargs["filename"]):
filename = kwargs["filename"](url)
else:
filename = kwargs["filename"]
else:
filename = windowing.URLtoFilename(url, outdir, ".bmp")
image.save(filename)
windowing.EndProcess(ieproc)
if timedout:
return "timeout"
def Time(urls, size, timeout, **kwargs):
"""Measure how long it takes to load each of a series of URLs
Args:
urls: list of URLs to time
size: size of browser window to use
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
A list of tuples (url, time). "time" can be "crashed" or "timeout"
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
proc = None
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
ret = []
for url in urls:
try:
# Invoke the browser if necessary
if not proc:
(wnd, proc, address_bar, render_pane, tab_window) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, (0,0), size, render_pane)
# Double-click in the address bar, type the name, and press Enter
mouse.DoubleClickInWindow(address_bar)
keyboard.TypeString(url)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(
tab_window, (6, 8, 22, 24), timeout)
timedout = load_time < 0
if timedout:
load_time = "timeout"
# Send an alt-F4 to make the browser close; if this times out,
# we've probably got a crash
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
load_time = "crashed"
proc = None
except pywintypes.error:
load_time = "crashed"
proc = None
ret.append( (url, load_time) )
# Send an alt-F4 to make the browser close; if this times out,
# we've probably got a crash
if proc:
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
return ret
def main():
# We're being invoked rather than imported, so run some tests
path = r"c:\sitecompare\scrapes\ie7\7.0.5380.11"
windowing.PreparePath(path)
# Scrape three sites and save the results
Scrape(
["http://www.microsoft.com",
"http://www.google.com",
"http://www.sun.com"],
path, (1024, 768), (0, 0))
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/tests/test_categorical.py | 9 | 162878 | # -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
from datetime import datetime
from pandas.compat import range, lrange, u, PY3
import os
import pickle
import re
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas import Categorical, Index, Series, DataFrame, PeriodIndex, Timestamp, CategoricalIndex
from pandas.core.config import option_context
import pandas.core.common as com
import pandas.compat as compat
import pandas.util.testing as tm
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c),dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(TypeError, lambda : Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(TypeError, lambda : Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'),categories=list('abc'),ordered=False)
c2 = Categorical(list('aabca'),categories=list('cab'),ordered=False)
c3 = Categorical(list('aabca'),categories=list('cab'),ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1,categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1,ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a","b","c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c","b","a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1,2], [1,2,2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a","b"], ["a","b","b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1,2], [1,2,np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","b","c","d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","c","b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","c","b"])
c2 = Categorical(c1, categories=["a","b","c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a","b","c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a","b","c","d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","c","b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","b","c","d"])
c2 = Categorical(Series(["a", "b", "c", "a"]), categories=["a","b","c","d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1,2,3,np.nan], categories=[1,2,3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan,1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3 ])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3. ])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3], categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember the original type"
# feature to try to cast the array interface result to...
#vals = np.asarray(cat[cat.notnull()])
#self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan,"a", "b", "c"], categories=[np.nan,"a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.], categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0,1,2,0,1,2], categories=["a","b","c"])
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0,1,2,0,1,2], categories=[3,4,5])
# the next one are from the old docs, but unfortunately these don't trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3])
cat = Categorical([1,2], categories=[1,2,3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([],dtype='int64'),categories=[3,2,1],ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'),categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'),categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci.astype(object),categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull returned a scalar
# for a generator
from pandas.compat import range as xrange
exp = Categorical([0,1,2])
cat = Categorical((x for x in [0,1,2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0,1,2], categories=(x for x in [0,1,2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0,1,2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1,2], [1,2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1,2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0,1,2], ["a","a","b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2,1,2], ["a","b","c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a","b","c"], ordered=False)
res = Categorical.from_codes([0,1,2], ["a","b","c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0,1], 5, p=[0.9,0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a","b","c"], categories=["c","b","a"], ordered=True)
cat_rev_base = pd.Categorical(["b","b","b"], categories=["c","b","a"], ordered=True)
cat = pd.Categorical(["a","b","c"], ordered=True)
cat_base = pd.Categorical(["b","b","b"], categories=cat.categories, ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(["b","b","b"], categories=["c","b","a","d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b","b","b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on newer
# numpy versions
a = np.array(["b","b","b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in account
cat_rev = pd.Categorical(list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3/8., 2/8., 3/8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'], name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a","b","c","d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3/8., 2/8., 3/8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'], name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1,2,3,1,2,3,3,2,1,1,1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5/11., 3/11., 3/11.]},
index=pd.CategoricalIndex([1, 2, 3], name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan,1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1/4., 2/4., 1/4.]},
index=pd.CategoricalIndex([1, 2, np.nan], categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan], categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0,0],[1,0.25],[2,0.5],[1,0.25]],
columns=['counts','freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan], name='categories'))
tm.assert_frame_equal(result,expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"], categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1/3.], [2, 2/3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result,expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0,1,2,0,1,2]*100, ['a', 'b', 'c'], name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]",
"Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a","b","c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a","b","c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1,2,3,4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]"""
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]"""
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2],dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0],dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0],dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a","b","c","a"])
exp = np.array([1,2,3,1])
s.categories = [1,2,3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1,2,3]))
# lengthen
def f():
s.categories = [1,2,3,4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1,2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0,1,2])
self.assertFalse(cat.ordered)
cat = Categorical([0,1,2],ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0,1,2],ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a","c","b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a','b','c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a","c","b"], categories=['b','c','a'], ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b','c','a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a","c","b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a','b','c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a","c","b"], categories=['b','c','a'], ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b','c','a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a","b","c","a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a","b","c","a"], ordered=True)
exp_categories = np.array(["c","b","a"])
exp_values = np.array(["a","b","c","a"])
res = cat.set_categories(["c","b","a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a","b","c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a","b","c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now np.nan
cat = Categorical(["a","b","c","a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0,-1,-1,0]))
# still not all "old" in "new"
res = cat.set_categories(["a","b","d"])
self.assert_numpy_array_equal(res.codes, np.array([0,1,-1,0]))
self.assert_numpy_array_equal(res.categories, np.array(["a","b","d"]))
# all "old" included in "new"
cat = cat.set_categories(["a","b","c","d"])
exp_categories = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1,2,3,4,1], categories=[1,2,3,4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0,1,2,3,0]))
self.assert_numpy_array_equal(c.categories , np.array([1,2,3,4] ))
self.assert_numpy_array_equal(c.get_values(), np.array([1,2,3,4,1] ))
c = c.set_categories([4,3,2,1]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3,2,1,0,3])) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4,3,2,1])) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1,2,3,4,1])) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4,3,2,1],ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4,3,2,1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a","b","c","a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1,2,3])
self.assert_numpy_array_equal(res.__array__(), np.array([1,2,3,1]))
self.assert_numpy_array_equal(res.categories, np.array([1,2,3]))
self.assert_numpy_array_equal(cat.__array__(), np.array(["a","b","c","a"]))
self.assert_numpy_array_equal(cat.categories, np.array(["a","b","c"]))
res = cat.rename_categories([1,2,3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1,2,3,1]))
self.assert_numpy_array_equal(cat.categories, np.array([1,2,3]))
# lengthen
def f():
cat.rename_categories([1,2,3,4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1,2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a","b","c","a"], ordered=True)
old = cat.copy()
new = Categorical(["a","b","c","a"], categories=["c","b","a"], ordered=True)
# first inplace == False
res = cat.reorder_categories(["c","b","a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c","b","a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a","b","c","a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a","b","d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a","b","c","d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a","b","c","a"], ordered=True)
old = cat.copy()
new = Categorical(["a","b","c","a"], categories=["a","b","c","d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a","b","c","a"], ordered=True)
old = cat.copy()
new = Categorical(["a","b",np.nan,"a"], categories=["a","b"], ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a","b","c","d","a"], categories=["a","b","c","d","e"])
exp_categories_all = np.array(["a","b","c","d","e"])
exp_categories_dropped = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a","b","c",np.nan], categories=["a","b","c","d","e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, np.array(["a","b","c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [ 2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a","b",np.nan,"a"])
self.assert_numpy_array_equal(c.categories , np.array(["a","b"]))
self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories , np.array(["a","b"]))
self.assert_numpy_array_equal(c._codes , np.array([0,-1,-1,0]))
# If categories have nan included, the code should point to that instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan])
self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0,1,2,0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0,2,2,0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a","b","c","a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a","b",np.nan]
self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0,1,2,0]))
# Adding nan to categories should make assigned nan point to the category!
c = Categorical(["a","b",np.nan,"a"])
self.assert_numpy_array_equal(c.categories , np.array(["a","b"]))
self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a","b",np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0,1,-1,0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0,2,-1,0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT], [pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a","b",np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a","b",np.nan], categories=["a","b",np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a","b",np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a","b",np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a","b","c","a", np.nan])
exp = np.array([0,1,2,0,-1],dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0,1,2,0,1],dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes= c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be writeable!
c[4] = "a"
exp = np.array([0,1,2,0,0],dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0,1,2,0, 2],dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a","b","c","d"], ordered=False)
self.assertRaises(TypeError, lambda : cat.min())
self.assertRaises(TypeError, lambda : cat.max())
cat = Categorical(["a","b","c","d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a","b","c","d"], categories=['d','c','b','a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan,"b","c",np.nan], categories=['d','c','b','a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"], categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"], categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'], ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1,1,2,4,5,5,5], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1,1,1,4,5,5,5], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([5,1], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1,2,3,4,5], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan,np.nan,np.nan,4,5], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan,np.nan,np.nan,4,5,4], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan,np.nan,4,5,4], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a","b","b","a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a","c","b","d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a","c","b","d"], categories=["a","b","c","d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d","c","b","a"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a","b","c","d","a","b","c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d","a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1,2,3])
exp = pd.Categorical([1,np.nan,3], categories=[1,2,3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0,3,2,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0,3,3,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0,3,0,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0,3,3,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2, np.nan, 3], categories=[1,2,3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0,1,3,2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1,2,3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1,2,3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo','foo','bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk' ])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts' ])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right', sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right', sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017, whatever is earlier
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017, whatever is earlier
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0,1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1,2], ["a","b","c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057 and following
# comparisons with scalars not in categories should raise for unequal comps, but not for
# equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4 , [False, False, False])
self.assert_numpy_array_equal(cat != 4 , [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = [ "{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500) ]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat','obj','num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False,True,False],index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False,False,True],index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True,False,False],index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo','bar','baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400) ])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000) ])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo','bar','baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400) ])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300) ])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype,'category')
self.assertEqual(len(s),len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A' : self.factor })
result = df['A']
tm.assert_series_equal(result,s)
result = df.iloc[:,0]
tm.assert_series_equal(result,s)
self.assertEqual(len(df),len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A' : s })
result = df['A']
tm.assert_series_equal(result,s)
self.assertEqual(len(df),len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A' : s, 'B' : s, 'C' : 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df),len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1,'John P. Doe'],[2,'Jane Dove'],[1,'John P. Doe']],
columns=['person_id','person_name'])
x['person_name'] = pd.Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result,expected)
result = x.person_name[0]
self.assertEqual(result,expected)
result = x.person_name.loc[0]
self.assertEqual(result,expected)
def test_creation_astype(self):
l = ["a","b","c","a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1,2,3,1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats":[1,2,3,4,5,6], "vals":[1,2,3,4,5,6]})
cats = Categorical([1,2,3,4,5,6])
exp_df = pd.DataFrame({"cats":cats, "vals":[1,2,3,4,5,6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats":['a', 'b', 'b', 'a', 'a', 'd'], "vals":[1,2,3,4,5,6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats":cats, "vals":[1,2,3,4,5,6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a","b","c","a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1,2,3,1]
exp = Series(l).astype('category')
res = Series(l,dtype='category')
tm.assert_series_equal(res, exp)
l = ["a","b","c","a"]
exp = Series(l).astype('category')
res = Series(l,dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan,np.nan,np.nan],categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame({'x': Series(['a', 'b', 'c'],dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({ 'A' : list('abc') }, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({ 0 : Series(list('abc'),dtype='category')})
tm.assert_frame_equal(df,expected)
df = DataFrame([pd.Categorical(list('abc')),pd.Categorical(list('abd'))])
expected = DataFrame({ 0 : Series(list('abc'),dtype='category'),
1 : Series(list('abd'),dtype='category')},columns=[0,1])
tm.assert_frame_equal(df,expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')),list('def')])
expected = DataFrame({ 0 : Series(list('abc'),dtype='category'),
1 : list('def')},columns=[0,1])
tm.assert_frame_equal(df,expected)
# invalid (shape)
self.assertRaises(ValueError, lambda : DataFrame([pd.Categorical(list('abc')),pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError, lambda : pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo']*len(p.major_axis))
expected = DataFrame({'A' : c.copy(),
'B' : c.copy(),
'C' : c.copy(),
'D' : c.copy()},
columns=Index(list('ABCD'),name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'],dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan,np.nan,np.nan],categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b','c'],categories=['a', 'b', 'c']))
expected.index = [1,2]
result = s.reindex([1,2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(values=['c',np.nan],categories=['a', 'b', 'c']))
expected.index = [2,3]
result = s.reindex([2,3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either the series or the
# categorical should not change the values in the other one, IF you specify copy!
cat = Categorical(["a","b","c","a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1,2,3]
exp_s = np.array([1,2,3,1])
exp_cat = np.array(["a","b","c","a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2,2,3,1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a","b","c","a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1,2,3]
exp_s = np.array([1,2,3,1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2,2,3,1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a","b",np.nan,"a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a","b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0,1,-1,0]))
# If categories have nan included, the label should point to that instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan]))
self.assert_numpy_array_equal(s2.cat.categories,
np.array(["a","b",np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0,1,2,0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a","b","c","a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a","b",np.nan]
self.assert_numpy_array_equal(s3.cat.categories,
np.array(["a","b",np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0,1,2,0]))
def test_cat_accessor(self):
s = Series(Categorical(["a","b",np.nan,"a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a","b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a","b",np.nan,"a"], categories=["b","a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a","b",np.nan,"a"], categories=["b","a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result,expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda : Series([1,2,3]).cat)
tm.assertRaisesRegexp(AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda : Series([1,2,3]).cat)
self.assertRaises(AttributeError, lambda : Series(['a','b','c']).cat)
self.assertRaises(AttributeError, lambda : Series(np.arange(5.)).cat)
self.assertRaises(AttributeError, lambda : Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered' and the
# methods '.set_categories()' 'drop_unused_categories()' to the categorical
s = Series(Categorical(["a","b","c","a"], ordered=True))
exp_categories = np.array(["a","b","c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1,2,3]
exp_categories = np.array([1,2,3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0,1,2,0],dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a","b","c","a"], ordered=True))
exp_categories = np.array(["c","b","a"])
exp_values = np.array(["a","b","c","a"])
s = s.cat.set_categories(["c","b","a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a","b","b","a"], categories=["a","b","c"]))
exp_categories = np.array(["a","b"])
exp_values = np.array(["a","b","b","a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error on wrong inputs:
def f():
s.set_categories([4,3,2,1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = [ "{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(np.random.randint(0, 10000, 100),dtype='int32')})
labels = [ "{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500) ]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype()],index=['value','D'])
tm.assert_series_equal(result,expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(), com.CategoricalDtype()],
index=['value','D','E'])
tm.assert_series_equal(result,expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1,2,3,10], categories=[1,2,3,4,10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns),1)
# In a frame, describe() for the cat should be the same as for string arrays (count, unique,
# top, freq)
cat = Categorical(["a","b","b","b"], categories=['a','b','c'], ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4,2,"b",3],index=['count','unique','top', 'freq'])
tm.assert_series_equal(result,expected)
cat = pd.Series(pd.Categorical(["a","b","c","c"]))
df3 = pd.DataFrame({"cat":cat, "s":["a","b","c","c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1,2,3,4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a","b"] *25))
exp = u("0 a\n1 b\n" + " ..\n" +
"48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(["a","b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" +
"dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp,a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2 ,3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2 ,3, 1, 2 ,3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2 ,3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2 ,3, 1, 2 ,3], categories=[1, 2, 3], ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n 2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2 ,3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2 ,3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2 ,3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2 ,3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({ 'int64' : np.random.randint(100,size=n) })
df['category'] = Series(np.array(list('abcdefghij')).take(np.random.randint(0,10,size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category']=='d']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
#self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a","b","c","d"], ordered=False))
self.assertRaises(TypeError, lambda : cat.min())
self.assertRaises(TypeError, lambda : cat.max())
cat = Series(Categorical(["a","b","c","d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a","b","c","d"], categories=['d','c','b','a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical([np.nan,"b","c",np.nan], categories=['d','c','b','a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1,1,2,4,5,5,5], categories=[5,4,3,2,1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[5,4,3,2,1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1,1,1,4,5,5,5], categories=[5,4,3,2,1], ordered=True))
res = s.mode()
exp = Series(Categorical([5,1], categories=[5,4,3,2,1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1,2,3,4,5], categories=[5,4,3,2,1], ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5,4,3,2,1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(["a","b","c","c","c","b"], categories=["c","a","b","d"]))
res = s.value_counts(sort=False)
exp = Series([3,1,2,0], index=pd.CategoricalIndex(["c","a","b","d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3,2,1,0], index=pd.CategoricalIndex(["c","b","a","d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3], index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0], index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(["a", "b", None, "a", None, None],
categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], categories=["a","b","c","d"], ordered=True)
data = DataFrame({"a":[1,1,1,2,2,2,3,4,5], "b":cats})
expected = DataFrame({'a': Series([1, 2, 4, np.nan],
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a","a","b","b"], categories=["a","b","z"], ordered=True)
raw_cat2 = Categorical(["c","d","c","d"], categories=["c","d","y"], ordered=True)
df = DataFrame({"A":raw_cat1,"B":raw_cat2, "values":[1,2,3,4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A','B'])
expected = DataFrame({ 'values' : Series([1,2,np.nan,3,4,np.nan,np.nan,np.nan,np.nan],
index=pd.MultiIndex.from_product([['a','b','z'],['c','d','y']],names=['A','B'])) })
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo','bar']*2
gb = df.groupby(['A','B','C'])
expected = DataFrame({ 'values' :
Series(np.nan,index=pd.MultiIndex.from_product([['a','b','z'],
['c','d','y'],
['foo','bar']],
names=['A','B','C']))
}).sortlevel()
expected.iloc[[1,2,7,8],0] = [1,2,3,4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x=pd.DataFrame([[1,'John P. Doe'],[2,'Jane Dove'],[1,'John P. Doe']],
columns=['person_id','person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x:x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0,1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0,1]].copy()
expected.index = Index([1,2],name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0,10,20,30,40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0,10,20,30,40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0], index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a","a","b","b"], categories=["a","b","z"], ordered=True)
raw_cat2 = Categorical(["c","d","c","d"], categories=["c","d","y"], ordered=True)
df = DataFrame({"A":raw_cat1,"B":raw_cat2, "values":[1,2,3,4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1,2,np.nan,3,4,np.nan,np.nan,np.nan,np.nan],
index=pd.MultiIndex.from_product([['a','b','z'],['c','d','y']],names=['A','B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a","b","b","a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(Categorical(["a","a","b","b"], ordered=False),index=[0,3,1,2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a","c","b","d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a","c","b","d"], categories=["a","b","c","d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d","c","b","a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a","b","c","d"], categories=["a","b","c","d"], ordered=False)
raw_cat2 = Categorical(["a","b","c","d"], categories=["d","c","b","a"], ordered=True)
s = ["a","b","c","d"]
df = DataFrame({"unsort":raw_cat1,"sort":raw_cat2, "string":s, "values":[1,2,3,4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id":[6,5,4,3,2,1], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1,2,5,0,3,4]]
tm.assert_frame_equal(result,expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2,1,5,4,3,0]]
tm.assert_frame_equal(result,expected)
# reverse
cat = Categorical(["a","c","c","b","d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d","c", "c", "b","a"],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d","c","b","a", np.nan],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d","c","b","a"],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d","c","b","a"],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d","c","b","a",np.nan],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1,2,3,4]))
reversed = cat[::-1]
exp = np.array([4,3,2,1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100)+1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0,25,50,75,100])
expected = Series([11,'(0, 25]'], index=['value','D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11,21).astype('int64')},
index=np.arange(10,20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0,25,50,75,100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9,'(0, 25]'],index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(["a","c","b","c","c","c","c"], categories=["a","b","c"])
idx = pd.Index(["h","i","j","k","l","m","n"])
values= [1,2,3,4,5,6,7]
df = pd.DataFrame({"cats":cats,"values":values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b","c"], categories=["a","b","c"])
idx2 = pd.Index(["j","k"])
values2= [3,4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats":cats2,"values":values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats,index=idx,name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b",3], index=["cats","values"], dtype="object", name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4,:]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2,:]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:,0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2,0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k",:]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j",:]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:,"cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j","cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
#res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k",:]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j",:]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:,"cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j",0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2,0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j","cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy,exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy,exp_fancy)
# get_value
res_val = df.get_value("j","cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2,4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2,3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:,0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:,slice(0,2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:,[0,1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
#GH 7918
cats = Categorical(["a","b","b","b","c","c","c"], categories=["a","b","c"])
idx = Index(["h","i","j","k","l","m","n",])
values= [1,2,2,2,3,4,5]
df = DataFrame({"cats":cats,"values":values}, index=idx)
result = df.iloc[2:4,:]
expected = DataFrame({"cats":Categorical(['b','b'],categories=['a','b','c']),"values":[2,2]}, index=['j','k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4,:].dtypes
expected = Series(['category','int64'],['cats','values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j","cats"]
expected = Series(Categorical(['a','b','b'],
categories=['a','b','c']), index=['h','i','j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j",0:1]
expected = DataFrame({'cats' : Series(Categorical(['a','b','b'],categories=['a','b','c']),index=['h','i','j']) })
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# - assign multiple rows (mixed values) (-> array) -> exp_multi_row
# - assign a part of a column with dtype == categorical -> exp_parts_cats_col
# - assign a part of a column with dtype != categorical -> exp_parts_cats_col
cats = pd.Categorical(["a","a","a","a","a","a","a"], categories=["a","b"])
idx = pd.Index(["h","i","j","k","l","m","n"])
values = [1,1,1,1,1,1,1]
orig = pd.DataFrame({"cats":cats,"values":values}, index=idx)
### the expected values
# changed single row
cats1 = pd.Categorical(["a","a","b","a","a","a","a"], categories=["a","b"])
idx1 = pd.Index(["h","i","j","k","l","m","n"])
values1 = [1,1,2,1,1,1,1]
exp_single_row = pd.DataFrame({"cats":cats1,"values":values1}, index=idx1)
#changed multiple rows
cats2 = pd.Categorical(["a","a","b","b","a","a","a"], categories=["a","b"])
idx2 = pd.Index(["h","i","j","k","l","m","n"])
values2 = [1,1,2,2,1,1,1]
exp_multi_row = pd.DataFrame({"cats":cats2,"values":values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(["a","a","b","b","a","a","a"], categories=["a","b"])
idx3 = pd.Index(["h","i","j","k","l","m","n"])
values3 = [1,1,1,1,1,1,1]
exp_parts_cats_col = pd.DataFrame({"cats":cats3,"values":values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(["a","a","b","a","a","a","a"], categories=["a","b"])
idx4 = pd.Index(["h","i","j","k","l","m","n"])
values4 = [1,1,1,1,1,1,1]
exp_single_cats_value = pd.DataFrame({"cats":cats4,"values":values4}, index=idx4)
#### iloc #####
################
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2,0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j",0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2,0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2,:] = ["b",2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2,:] = ["c",2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4,:] = [["b",2],["b",2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4,:] = [["c",2],["c",2]]
self.assertRaises(ValueError, f)
# - assign a part of a column with dtype == categorical -> exp_parts_cats_col
df = orig.copy()
df.iloc[2:4,0] = pd.Categorical(["b","b"], categories=["a","b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4,0] = pd.Categorical(["b","b"], categories=["a","b","c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4,0] = pd.Categorical(["c","c"], categories=["a","b","c"])
# - assign a part of a column with dtype != categorical -> exp_parts_cats_col
df = orig.copy()
df.iloc[2:4,0] = ["b","b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4,0] = ["c","c"]
#### loc #####
################
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j","cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j","cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j","cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j",:] = ["b",2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j",:] = ["c",2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k",:] = [["b",2],["b",2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k",:] = [["c",2],["c",2]]
self.assertRaises(ValueError, f)
# - assign a part of a column with dtype == categorical -> exp_parts_cats_col
df = orig.copy()
df.loc["j":"k","cats"] = pd.Categorical(["b","b"], categories=["a","b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k","cats"] = pd.Categorical(["b","b"], categories=["a","b","c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k","cats"] = pd.Categorical(["c","c"], categories=["a","b","c"])
# - assign a part of a column with dtype != categorical -> exp_parts_cats_col
df = orig.copy()
df.loc["j":"k","cats"] = ["b","b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k","cats"] = ["c","c"]
#### ix #####
################
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j",0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j",0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j",0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j",:] = ["b",2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j",:] = ["c",2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k",:] = [["b",2],["b",2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k",:] = [["c",2],["c",2]]
self.assertRaises(ValueError, f)
# - assign a part of a column with dtype == categorical -> exp_parts_cats_col
df = orig.copy()
df.ix["j":"k",0] = pd.Categorical(["b","b"], categories=["a","b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k",0] = pd.Categorical(["b","b"], categories=["a","b","c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k",0] = pd.Categorical(["c","c"], categories=["a","b","c"])
# - assign a part of a column with dtype != categorical -> exp_parts_cats_col
df = orig.copy()
df.ix["j":"k",0] = ["b","b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k",0] = ["c","c"]
# iat
df = orig.copy()
df.iat[2,0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2,0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j","cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j","cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(["a","a","c","c","a","a","a"], categories=["a","b","c"])
idxf = pd.Index(["h","i","j","k","l","m","n"])
valuesf = [1,1,3,3,1,1,1]
df = pd.DataFrame({"cats":catsf,"values":valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a","b","c"], inplace=True)
df[df["cats"] == "c"] = ["b",2]
tm.assert_frame_equal(df, exp_multi_row)
# set_value
df = orig.copy()
df.set_value("j","cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j","cats", "c")
self.assertRaises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of the Catgorical
df = pd.DataFrame({"a":[1,1,1,1,1], "b":["a","a","a","a","a"]})
exp = pd.DataFrame({"a":[1,"b","b",1,1], "b":["a","a","b","b","a"]})
df.loc[1:2,"a"] = pd.Categorical(["b","b"], categories=["a","b"])
df.loc[2:3,"b"] = pd.Categorical(["b","b"], categories=["a","b"])
tm.assert_frame_equal(df, exp)
######### Series ##########
orig = Series(pd.Categorical(["b","b"], categories=["a","b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a","a"], categories=["a","b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b","a"], categories=["a","b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b","a"], categories=["a","b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b","a"], categories=["a","b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(pd.Categorical(["b","a"], categories=["a","b"]), index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1,2,3]))
exp = Series(Categorical([1,np.nan,3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1,2,3], [3,2,1], [2,2,2])]
for data , reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse, ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse, ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also not the other way
# around
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
self.assertRaises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
self.assertRaises(TypeError, f)
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057 and following
# comparisons with scalars not in categories should raise for unequal comps, but not for
# equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
self.assertRaises(TypeError, lambda: cat < "d")
self.assertRaises(TypeError, lambda: cat > "d")
self.assertRaises(TypeError, lambda: "d" < cat)
self.assertRaises(TypeError, lambda: "d" > cat)
self.assert_series_equal(cat == "d" , Series([False, False, False]))
self.assert_series_equal(cat != "d" , Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a","b","c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'),dtype="category")
b = Series(list('abc'),dtype="object")
c = Series(['a','b','cc'],dtype="object")
d = Series(list('acb'),dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
self.assertFalse((a=='a').all())
self.assertTrue(((a!='a') == ~(a=='a')).all())
self.assertFalse(('a'==a).all())
self.assertTrue((a=='a')[0])
self.assertTrue(('a'==a)[0])
self.assertFalse(('a'!=a)[0])
# vs list-like
self.assertTrue((a==a).all())
self.assertFalse((a!=a).all())
self.assertTrue((a==list(a)).all())
self.assertTrue((a==b).all())
self.assertTrue((b==a).all())
self.assertTrue(((~(a==b))==(a!=b)).all())
self.assertTrue(((~(b==a))==(b!=a)).all())
self.assertFalse((a==c).all())
self.assertFalse((c==a).all())
self.assertFalse((a==d).all())
self.assertFalse((d==a).all())
# vs a cat-like
self.assertTrue((a==e).all())
self.assertTrue((e==a).all())
self.assertFalse((a==f).all())
self.assertFalse((f==a).all())
self.assertTrue(((~(a==e)==(a!=e)).all()))
self.assertTrue(((~(e==a)==(e!=a)).all()))
self.assertTrue(((~(a==f)==(a!=f)).all()))
self.assertTrue(((~(f==a)==(f!=a)).all()))
# non-equality is not comparable
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: b < a)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: b > a)
def test_concat(self):
cat = pd.Categorical(["a","b"], categories=["a","b"])
vals = [1,2]
df = pd.DataFrame({"cats":cat, "vals":vals})
cat2 = pd.Categorical(["a","b","a","b"], categories=["a","b"])
vals2 = [1,2,1,2]
exp = pd.DataFrame({"cats":cat2, "vals":vals2}, index=pd.Index([0, 1, 0, 1]))
res = pd.concat([df,df])
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same categories
cat3 = pd.Categorical(["a","b"], categories=["a","b","c"])
vals3 = [1,2]
df_wrong_categories = pd.DataFrame({"cats":cat3, "vals":vals3})
def f():
pd.concat([df,df_wrong_categories])
self.assertRaises(ValueError, f)
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
self.assert_numpy_array_equal(df['grade'].cat.categories, df1['grade'].cat.categories)
self.assert_numpy_array_equal(df['grade'].cat.categories, df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
dfx['grade'].cat.categories
self.assert_numpy_array_equal(df['grade'].cat.categories, dfx['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641
# series concat not preserving category dtype
s = Series(list('abc'),dtype='category')
s2 = Series(list('abd'),dtype='category')
def f():
pd.concat([s,s2])
self.assertRaises(ValueError, f)
result = pd.concat([s,s],ignore_index=True)
expected = Series(list('abcabc')).astype('category')
tm.assert_series_equal(result, expected)
result = pd.concat([s,s])
expected = Series(list('abcabc'),index=[0,1,2,0,1,2]).astype('category')
tm.assert_series_equal(result, expected)
a = Series(np.arange(6,dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('cab')) })
result = pd.concat([df2,df2])
expected = DataFrame({'A' : pd.concat([a,a]), 'B' : pd.concat([b,b]).astype('category',categories=list('cab')) })
tm.assert_frame_equal(result, expected)
def test_categorical_index_preserver(self):
a = Series(np.arange(6,dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('cab')) }).set_index('B')
result = pd.concat([df2,df2])
expected = DataFrame({'A' : pd.concat([a,a]), 'B' : pd.concat([b,b]).astype('category',categories=list('cab')) }).set_index('B')
tm.assert_frame_equal(result, expected)
# wrong catgories
df3 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('abc')) }).set_index('B')
self.assertRaises(TypeError, lambda : pd.concat([df2,df3]))
def test_append(self):
cat = pd.Categorical(["a","b"], categories=["a","b"])
vals = [1,2]
df = pd.DataFrame({"cats":cat, "vals":vals})
cat2 = pd.Categorical(["a","b","a","b"], categories=["a","b"])
vals2 = [1,2,1,2]
exp = pd.DataFrame({"cats":cat2, "vals":vals2}, index=pd.Index([0, 1, 0, 1]))
res = df.append(df)
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same categories
cat3 = pd.Categorical(["a","b"], categories=["a","b","c"])
vals3 = [1,2]
df_wrong_categories = pd.DataFrame({"cats":cat3, "vals":vals3})
def f():
df.append(df_wrong_categories)
self.assertRaises(ValueError, f)
def test_merge(self):
# GH 9426
right = DataFrame({'c': {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e'},
'd': {0: 'null', 1: 'null', 2: 'null', 3: 'null', 4: 'null'}})
left = DataFrame({'a': {0: 'f', 1: 'f', 2: 'f', 3: 'f', 4: 'f'},
'b': {0: 'g', 1: 'g', 2: 'g', 3: 'g', 4: 'g'}})
df = pd.merge(left, right, how='left', left_on='b', right_on='c')
# object-object
expected = df.copy()
# object-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
result = pd.merge(left, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
def test_repeat(self):
#GH10183
cat = pd.Categorical(["a","b"], categories=["a","b"])
exp = pd.Categorical(["a", "a", "b", "b"], categories=["a","b"])
res = cat.repeat(2)
self.assert_categorical_equal(res, exp)
def test_na_actions(self):
cat = pd.Categorical([1,2,3,np.nan], categories=[1,2,3])
vals = ["a","b",np.nan,"d"]
df = pd.DataFrame({"cats":cat, "vals":vals})
cat2 = pd.Categorical([1,2,3,3], categories=[1,2,3])
vals2 = ["a","b","b","d"]
df_exp_fill = pd.DataFrame({"cats":cat2, "vals":vals2})
cat3 = pd.Categorical([1,2,3], categories=[1,2,3])
vals3 = ["a","b",np.nan]
df_exp_drop_cats = pd.DataFrame({"cats":cat3, "vals":vals3})
cat4 = pd.Categorical([1,2], categories=[1,2,3])
vals4 = ["a","b"]
df_exp_drop_all = pd.DataFrame({"cats":cat4, "vals":vals4})
# fillna
res = df.fillna(value={"cats":3, "vals":"b"})
tm.assert_frame_equal(res, df_exp_fill)
def f():
df.fillna(value={"cats":4, "vals":"c"})
self.assertRaises(ValueError, f)
res = df.fillna(method='pad')
tm.assert_frame_equal(res, df_exp_fill)
res = df.dropna(subset=["cats"])
tm.assert_frame_equal(res, df_exp_drop_cats)
res = df.dropna()
tm.assert_frame_equal(res, df_exp_drop_all)
# make sure that fillna takes both missing values and NA categories into account
c = Categorical(["a","b",np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a","b",np.nan], rename=True, inplace=True)
c[0] = np.nan
df = pd.DataFrame({"cats":c, "vals":[1,2,3]})
df_exp = pd.DataFrame({"cats": Categorical(["a","b","a"]), "vals": [1,2,3]})
res = df.fillna("a")
tm.assert_frame_equal(res, df_exp)
def test_astype_to_other(self):
s = self.cat['value_group']
expected = s
tm.assert_series_equal(s.astype('category'),expected)
tm.assert_series_equal(s.astype(com.CategoricalDtype()),expected)
self.assertRaises(ValueError, lambda : s.astype('float64'))
cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_series_equal(cat.astype('str'), exp)
s2 = Series(Categorical.from_array(['1', '2', '3', '4']))
exp2 = Series([1,2,3,4]).astype(int)
tm.assert_series_equal(s2.astype('int') , exp2)
# object don't sort correctly, so just compare that we have the same values
def cmp(a,b):
tm.assert_almost_equal(np.sort(np.unique(a)),np.sort(np.unique(b)))
expected = Series(np.array(s.values),name='value_group')
cmp(s.astype('object'),expected)
cmp(s.astype(np.object_),expected)
# array conversion
tm.assert_almost_equal(np.array(s),np.array(s.values))
# valid conversion
for valid in [lambda x: x.astype('category'),
lambda x: x.astype(com.CategoricalDtype()),
lambda x: x.astype('object').astype('category'),
lambda x: x.astype('object').astype(com.CategoricalDtype())]:
result = valid(s)
tm.assert_series_equal(result,s)
# invalid conversion (these are NOT a dtype)
for invalid in [lambda x: x.astype(pd.Categorical),
lambda x: x.astype('object').astype(pd.Categorical)]:
self.assertRaises(TypeError, lambda : invalid(s))
def test_astype_categorical(self):
cat = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_categorical_equal(cat,cat.astype('category'))
tm.assert_almost_equal(np.array(cat),cat.astype('object'))
self.assertRaises(ValueError, lambda : cat.astype(float))
def test_to_records(self):
# GH8626
# dict creation
df = DataFrame({ 'A' : list('abc') }, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
dtype=[('index', '=i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
def test_numeric_like_ops(self):
# numeric ops should not succeed
for op in ['__add__','__sub__','__mul__','__truediv__']:
self.assertRaises(TypeError, lambda : getattr(self.cat,op)(self.cat))
# reduction ops should not succeed (unless specifically defined, e.g. min/max)
s = self.cat['value_group']
for op in ['kurt','skew','var','std','mean','sum','median']:
self.assertRaises(TypeError, lambda : getattr(s,op)(numeric_only=False))
# mad technically works because it takes always the numeric data
# numpy ops
s = pd.Series(pd.Categorical([1,2,3,4]))
self.assertRaises(TypeError, lambda : np.sum(s))
# numeric ops on a Series
for op in ['__add__','__sub__','__mul__','__truediv__']:
self.assertRaises(TypeError, lambda : getattr(s,op)(2))
# invalid ufunc
self.assertRaises(TypeError, lambda : np.log(s))
def test_cat_tab_completition(self):
# test the tab completion display
ok_for_cat = ['categories','codes','ordered','set_categories',
'add_categories', 'remove_categories', 'rename_categories',
'reorder_categories', 'remove_unused_categories',
'as_ordered', 'as_unordered']
def get_dir(s):
results = [ r for r in s.cat.__dir__() if not r.startswith('_') ]
return list(sorted(set(results)))
s = Series(list('aabbcde')).astype('category')
results = get_dir(s)
tm.assert_almost_equal(results,list(sorted(set(ok_for_cat))))
def test_cat_accessor_api(self):
# GH 9322
from pandas.core.categorical import CategoricalAccessor
self.assertIs(Series.cat, CategoricalAccessor)
s = Series(list('aabbcde')).astype('category')
self.assertIsInstance(s.cat, CategoricalAccessor)
invalid = Series([1])
with tm.assertRaisesRegexp(AttributeError, "only use .cat accessor"):
invalid.cat
self.assertFalse(hasattr(invalid, 'cat'))
def test_cat_accessor_no_new_attributes(self):
# https://github.com/pydata/pandas/issues/10673
c = Series(list('aabbcde')).astype('category')
with tm.assertRaisesRegexp(AttributeError, "You cannot add any new attribute"):
c.cat.xlabel = "a"
def test_str_accessor_api_for_categorical(self):
# https://github.com/pydata/pandas/issues/10661
from pandas.core.strings import StringMethods
s = Series(list('aabb'))
s = s + " " + s
c = s.astype('category')
self.assertIsInstance(c.str, StringMethods)
# str functions, which need special arguments
special_func_defs = [
('cat', (list("zyxw"),), {"sep": ","}),
('center', (10,), {}),
('contains', ("a",), {}),
('count', ("a",), {}),
('decode', ("UTF-8",), {}),
('encode', ("UTF-8",), {}),
('endswith', ("a",), {}),
('extract', ("([a-z]*) ",), {}),
('find', ("a",), {}),
('findall', ("a",), {}),
('index', (" ",), {}),
('ljust', (10,), {}),
('match', ("a"), {}), # deprecated...
('normalize', ("NFC",), {}),
('pad', (10,), {}),
('partition', (" ",), {"expand": False}), # not default
('partition', (" ",), {"expand": True}), # default
('repeat', (3,), {}),
('replace', ("a", "z"), {}),
('rfind', ("a",), {}),
('rindex', (" ",), {}),
('rjust', (10,), {}),
('rpartition', (" ",), {"expand": False}), # not default
('rpartition', (" ",), {"expand": True}), # default
('slice', (0,1), {}),
('slice_replace', (0,1,"z"), {}),
('split', (" ",), {"expand":False}), #default
('split', (" ",), {"expand":True}), # not default
('startswith', ("a",), {}),
('wrap', (2,), {}),
('zfill', (10,), {})
]
_special_func_names = [f[0] for f in special_func_defs]
# * get, join: they need a individual elements of type lists, but
# we can't make a categorical with lists as individual categories.
# -> `s.str.split(" ").astype("category")` will error!
# * `translate` has different interfaces for py2 vs. py3
_ignore_names = ["get", "join", "translate"]
str_func_names = [f for f in dir(s.str) if not (f.startswith("_") or
f in _special_func_names or
f in _ignore_names)]
func_defs = [(f, (), {}) for f in str_func_names]
func_defs.extend(special_func_defs)
for func, args, kwargs in func_defs:
res = getattr(c.str, func)(*args, **kwargs)
exp = getattr(s.str, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
else:
tm.assert_series_equal(res, exp)
invalid = Series([1,2,3]).astype('category')
with tm.assertRaisesRegexp(AttributeError, "Can only use .str accessor with string"):
invalid.str
self.assertFalse(hasattr(invalid, 'str'))
def test_dt_accessor_api_for_categorical(self):
# https://github.com/pydata/pandas/issues/10661
from pandas.tseries.common import Properties
from pandas.tseries.index import date_range, DatetimeIndex
from pandas.tseries.period import period_range, PeriodIndex
from pandas.tseries.tdi import timedelta_range, TimedeltaIndex
s_dr = Series(date_range('1/1/2015', periods=5, tz="MET"))
c_dr = s_dr.astype("category")
s_pr = Series(period_range('1/1/2015', freq='D', periods=5))
c_pr = s_pr.astype("category")
s_tdr = Series(timedelta_range('1 days','10 days'))
c_tdr = s_tdr.astype("category")
test_data = [
("Datetime", DatetimeIndex._datetimelike_ops, s_dr, c_dr),
("Period", PeriodIndex._datetimelike_ops, s_pr, c_pr),
("Timedelta", TimedeltaIndex._datetimelike_ops, s_tdr, c_tdr)]
self.assertIsInstance(c_dr.dt, Properties)
special_func_defs = [
('strftime', ("%Y-%m-%d",), {}),
('tz_convert', ("EST",), {}),
#('tz_localize', ("UTC",), {}),
]
_special_func_names = [f[0] for f in special_func_defs]
# the series is already localized
_ignore_names = ['tz_localize']
for name, attr_names, s, c in test_data:
func_names = [f for f in dir(s.dt) if not (f.startswith("_") or
f in attr_names or
f in _special_func_names or
f in _ignore_names)]
func_defs = [(f, (), {}) for f in func_names]
for f_def in special_func_defs:
if f_def[0] in dir(s.dt):
func_defs.append(f_def)
for func, args, kwargs in func_defs:
res = getattr(c.dt, func)(*args, **kwargs)
exp = getattr(s.dt, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_numpy_array_equal(res, exp)
for attr in attr_names:
try:
res = getattr(c.dt, attr)
exp = getattr(s.dt, attr)
except Exception as e:
print(name, attr)
raise e
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_numpy_array_equal(res, exp)
invalid = Series([1,2,3]).astype('category')
with tm.assertRaisesRegexp(AttributeError, "Can only use .dt accessor with datetimelike"):
invalid.dt
self.assertFalse(hasattr(invalid, 'str'))
def test_pickle_v0_14_1(self):
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_14_1.pickle')
# This code was executed once on v0.14.1 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
self.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_pickle_v0_15_2(self):
# ordered -> _ordered
# GH 9347
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
self.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_concat_categorical(self):
# See GH 10177
df1 = pd.DataFrame(np.arange(18, dtype='int64').reshape(6, 3), columns=["a", "b", "c"])
df2 = pd.DataFrame(np.arange(14, dtype='int64').reshape(7, 2), columns=["a", "c"])
df2['h'] = pd.Series(pd.Categorical(["one", "one", "two", "one", "two", "two", "one"]))
df_concat = pd.concat((df1, df2), axis=0).reset_index(drop=True)
df_expected = pd.DataFrame({'a': [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
'b': [1, 4, 7, 10, 13, 16, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
'c': [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13]})
df_expected['h'] = pd.Series(pd.Categorical([None, None, None, None, None, None,
"one", "one", "two", "one", "two", "two", "one"]))
tm.assert_frame_equal(df_expected, df_concat)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core']
exit=False)
| artistic-2.0 |
aroche/django | django/db/backends/sqlite3/introspection.py | 204 | 11332 | import re
from collections import namedtuple
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('default',))
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [
FieldInfo(
info['name'],
info['type'],
None,
info['size'],
None,
None,
info['null_ok'],
info['default'],
) for info in self._table_info(cursor, table_name)
]
def column_name_converter(self, name):
"""
SQLite will in some cases, e.g. when returning columns from views and
subselects, return column names in 'alias."column"' format instead of
simply 'column'.
Affects SQLite < 3.7.15, fixed by http://www.sqlite.org/src/info/5526e0aa3c
"""
# TODO: remove when SQLite < 3.7.15 is sufficiently old.
# 3.7.13 ships in Debian stable as of 2014-03-21.
if self.connection.Database.sqlite_version_info < (3, 7, 15):
return name.split('.')[-1].strip('"')
else:
return name
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
try:
results = cursor.fetchone()[0].strip()
except TypeError:
# It might be a view, then no results will be returned
return relations
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match('FOREIGN KEY\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_indexes(self, cursor, table_name):
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False),
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?$', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, default_value, pk
return [{
'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'default': field[4],
'pk': field[5], # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Sqlite3 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
return constraints
| bsd-3-clause |
cogmission/nupic | setup.py | 2 | 6053 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Installation script for Python nupic package."""
import os
import pkg_resources
import sys
from setuptools import setup, find_packages, Extension
from setuptools.command.test import test as BaseTestCommand
REPO_DIR = os.path.dirname(os.path.realpath(__file__))
def getVersion():
"""
Get version from local file.
"""
with open(os.path.join(REPO_DIR, "VERSION"), "r") as versionFile:
return versionFile.read().strip()
def nupicBindingsPrereleaseInstalled():
"""
Make an attempt to determine if a pre-release version of nupic.bindings is
installed already.
@return: boolean
"""
try:
nupicDistribution = pkg_resources.get_distribution("nupic.bindings")
if pkg_resources.parse_version(nupicDistribution.version).is_prerelease:
# A pre-release dev version of nupic.bindings is installed.
return True
except pkg_resources.DistributionNotFound:
pass # Silently ignore. The absence of nupic.bindings will be handled by
# setuptools by default
return False
def parse_file(requirementFile):
try:
return [
line.strip()
for line in open(requirementFile).readlines()
if not line.startswith("#")
]
except IOError:
return []
class TestCommand(BaseTestCommand):
user_options = [("pytest-args=", "a", "Arguments to pass to py.test")]
def initialize_options(self):
BaseTestCommand.initialize_options(self)
self.pytest_args = ["unit"] # pylint: disable=W0201
def finalize_options(self):
BaseTestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
cwd = os.getcwd()
try:
os.chdir("tests")
errno = pytest.main(self.pytest_args)
finally:
os.chdir(cwd)
sys.exit(errno)
def findRequirements():
"""
Read the requirements.txt file and parse into requirements for setup's
install_requirements option.
"""
requirementsPath = os.path.join(REPO_DIR, "requirements.txt")
requirements = parse_file(requirementsPath)
if nupicBindingsPrereleaseInstalled():
# User has a pre-release version of nupic.bindings installed, which is only
# possible if the user installed and built nupic.bindings from source and
# it is up to the user to decide when to update nupic.bindings. We'll
# quietly remove the entry in requirements.txt so as to not conflate the
# two.
requirements = [req for req in requirements if "nupic.bindings" not in req]
return requirements
if __name__ == "__main__":
requirements = findRequirements()
setup(
name="nupic",
version=getVersion(),
install_requires=requirements,
package_dir = {"": "src"},
packages=find_packages("src"),
namespace_packages = ["nupic"],
package_data={
"nupic.support": ["nupic-default.xml",
"nupic-logging.conf"],
"nupic": ["README.md", "LICENSE.txt"],
"nupic.data": ["*.json"],
"nupic.frameworks.opf.exp_generator": ["*.json", "*.tpl"],
"nupic.frameworks.opf.jsonschema": ["*.json"],
"nupic.swarming.exp_generator": ["*.json", "*.tpl"],
"nupic.swarming.jsonschema": ["*.json"],
"nupic.datafiles": ["*.csv", "*.txt"],
},
cmdclass = {"test": TestCommand},
include_package_data=True,
zip_safe=False,
extras_require = {
# Default requirement based on system type
":platform_system=='Linux' or platform_system=='Darwin'":
["pycapnp==0.5.8"],
# Superseded by platform_system-conditional requirement, but keeping
# empty extra for compatibility as recommended by setuptools doc.
"capnp": [],
},
description="Numenta Platform for Intelligent Computing",
author="Numenta",
author_email="help@numenta.org",
url="https://github.com/numenta/nupic",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
# It has to be "5 - Production/Stable" or else pypi rejects it!
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
long_description=(
"Numenta Platform for Intelligent Computing: a machine intelligence "
"platform that implements the HTM learning algorithms. HTM is a "
"detailed computational theory of the neocortex. At the core of HTM "
"are time-based continuous learning algorithms that store and recall "
"spatial and temporal patterns. NuPIC is suited to a variety of "
"problems, particularly anomaly detection and prediction of streaming "
"data sources.\n\n"
"For more information, see http://numenta.org or the NuPIC wiki at "
"https://github.com/numenta/nupic/wiki.")
)
| agpl-3.0 |
mattjj/pyhsmm-slds | pyslds/models.py | 2 | 20741 | from __future__ import division
import numpy as np
from functools import partial
from builtins import zip
from pybasicbayes.distributions import DiagonalRegression, Gaussian, Regression
import pyhsmm
from pyhsmm.util.general import list_split
from pyslds.states import HMMSLDSStatesPython, HMMSLDSStatesEigen, HSMMSLDSStatesPython, HSMMSLDSStatesEigen
from pyslds.states import HMMCountSLDSStatesPython, HMMCountSLDSStatesEigen, HSMMCountSLDSStatesPython, \
HSMMCountSLDSStatesEigen
from pyslds.util import gaussian_map_estimation, regression_map_estimation, gaussian_logprior, regression_logprior
class _SLDSMixin(object):
def __init__(self,dynamics_distns,emission_distns,init_dynamics_distns,**kwargs):
self.init_dynamics_distns = init_dynamics_distns
self.dynamics_distns = dynamics_distns
# Allow for a single, shared emission distribution
if not isinstance(emission_distns, list):
self._single_emission = True
self._emission_distn = emission_distns
self.emission_distns = [emission_distns] * len(self.dynamics_distns)
else:
assert isinstance(emission_distns, list) and \
len(emission_distns) == len(dynamics_distns)
self._single_emission = False
self.emission_distns = emission_distns
super(_SLDSMixin,self).__init__(
obs_distns=self.dynamics_distns,**kwargs)
def generate(self, T=100, keep=True, with_noise=True, initial_condition=None, stateseq=None, **kwargs):
s = self._states_class(model=self, T=T, initialize_from_prior=True, **kwargs)
s.generate_states(with_noise=with_noise, initial_condition=initial_condition, stateseq=stateseq)
data = self._generate_obs(s)
if keep:
self.states_list.append(s)
return data + (s.stateseq,)
def _generate_obs(self,s):
if s.data is None:
s.data = s.generate_obs()
else:
# TODO: Handle missing data
raise NotImplementedError
return s.data, s.gaussian_states
def smooth(self, data, inputs=None, mask=None):
self.add_data(data, inputs=inputs, mask=mask)
s = self.states_list.pop()
return s.smooth()
@property
def diagonal_noise(self):
return all([isinstance(ed, DiagonalRegression) for ed in self.emission_distns])
@property
def has_missing_data(self):
return any([s.mask is not None for s in self.states_list])
def heldout_log_likelihood(self, test_masks=None):
test_masks = [None] * len(self.states_list) if test_masks is None else test_masks
assert len(test_masks) == len(self.states_list)
hll = 0
for mask, states in zip(test_masks, self.states_list):
hll += states.heldout_log_likelihood(test_mask=mask)
return hll
class _SLDSGibbsMixin(_SLDSMixin):
def resample_parameters(self):
self.resample_lds_parameters()
self.resample_hmm_parameters()
def resample_lds_parameters(self):
self.resample_init_dynamics_distns()
self.resample_dynamics_distns()
self.resample_emission_distns()
def resample_hmm_parameters(self):
super(_SLDSGibbsMixin,self).resample_parameters()
def resample_init_dynamics_distns(self):
for state, d in enumerate(self.init_dynamics_distns):
d.resample(
[s.gaussian_states[0] for s in self.states_list
if s.stateseq[0] == state])
self._clear_caches()
def resample_dynamics_distns(self):
zs = [s.stateseq[:-1] for s in self.states_list]
xs = [np.hstack((s.gaussian_states[:-1], s.inputs[:-1]))
for s in self.states_list]
ys = [s.gaussian_states[1:] for s in self.states_list]
for state, d in enumerate(self.dynamics_distns):
d.resample(
[(x[z == state], y[z == state])
for x, y, z in zip(xs, ys, zs)])
self._clear_caches()
def resample_emission_distns(self):
if self._single_emission:
data = [(np.hstack((s.gaussian_states, s.inputs)), s.data)
for s in self.states_list]
mask = [s.mask for s in self.states_list] if self.has_missing_data else None
if self.has_missing_data:
self._emission_distn.resample(data=data, mask=mask)
else:
self._emission_distn.resample(data=data)
else:
for state, d in enumerate(self.emission_distns):
data = [(np.hstack((s.gaussian_states[s.stateseq == state],
s.inputs[s.stateseq == state])),
s.data[s.stateseq == state])
for s in self.states_list]
mask = [s.mask[s.stateseq == state] for s in self.states_list] \
if self.has_missing_data else None
if self.has_missing_data:
d.resample(data=data, mask=mask)
else:
d.resample(data=data)
self._clear_caches()
def resample_obs_distns(self):
pass # handled in resample_parameters
### joblib parallel
def _joblib_resample_states(self,states_list,num_procs):
from joblib import Parallel, delayed
import pyslds.parallel as parallel
if len(states_list) > 0:
joblib_args = list(map(self._get_joblib_pair, states_list))
parallel.model = self
parallel.args = list_split(joblib_args, num_procs)
idxs = range(len(parallel.args))
raw_stateseqs = Parallel(n_jobs=num_procs,backend='multiprocessing')\
(list(map(delayed(parallel._get_sampled_stateseq), idxs)))
flatten = lambda lst: [x for y in lst for x in y]
raw_stateseqs = flatten(raw_stateseqs)
# since list_split might reorder things, do the same to states_list
states_list = flatten(list_split(states_list, num_procs))
for s, tup in zip(states_list, raw_stateseqs):
s.stateseq, s.gaussian_states, s._normalizer = tup
class _SLDSVBEMMixin(_SLDSMixin):
def _vb_E_step(self):
# update the variational approximation for the states
for state in self.states_list:
state.vb_E_step()
def _vb_M_step(self):
# Update the HMM parameters
self._M_step_init_state_distn()
self._M_step_trans_distn()
# Update the LDS parameters
self._M_step_init_dynamics_distn()
self._M_step_dynamics_distn()
self._M_step_emission_distn()
def _M_step_init_dynamics_distn(self):
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
E_init_stats = lambda i, s: \
tuple(s.expected_states[0, i] * stat for stat in s.E_init_stats)
for state, d in enumerate(self.init_dynamics_distns):
gaussian_map_estimation(sum_tuples(E_init_stats(state, s) for s in self.states_list), d)
def _M_step_dynamics_distn(self):
contract = partial(np.tensordot, axes=1)
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
E_dyn_stats = lambda i, s: \
tuple(contract(s.expected_states[:-1, i], stat) for stat in s.E_dynamics_stats)
for state, d in enumerate(self.dynamics_distns):
regression_map_estimation(sum_tuples(E_dyn_stats(state, s) for s in self.states_list), d)
def _M_step_emission_distn(self):
contract = partial(np.tensordot, axes=1)
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
if self._single_emission:
E_emi_stats = lambda s: \
tuple(np.sum(stat, axis=0) for stat in s.E_emission_stats)
stats = sum_tuples(E_emi_stats(s) for s in self.states_list)
regression_map_estimation(stats, self._emission_distn)
else:
E_emi_stats = lambda i, s: \
tuple(contract(s.expected_states[:, i], stat) for stat in s.E_emission_stats)
for state, d in enumerate(self.emission_distns):
regression_map_estimation(sum_tuples(E_emi_stats(state, s) for s in self.states_list), d)
def VBEM_step(self, n_iter=1):
for _ in range(n_iter):
self._vb_E_step()
self._vb_M_step()
def VBEM_ELBO(self):
# log p(theta)
# todo: include transition distribution and init state distribution!
elbo = np.sum([gaussian_logprior(id) for id in self.init_dynamics_distns])
elbo += np.sum([regression_logprior(dd) for dd in self.dynamics_distns])
if self._single_emission:
elbo += regression_logprior(self.emission_distns[0])
else:
elbo += np.sum([regression_logprior(ed) for ed in self.emission_distns])
# E_q [log p(z, x, y, theta)]
elbo += sum(s.vb_elbo() for s in self.states_list)
return elbo
class _SLDSMeanFieldMixin(_SLDSMixin):
def meanfield_update_parameters(self):
self.meanfield_update_init_dynamics_distns()
self.meanfield_update_dynamics_distns()
self.meanfield_update_emission_distns()
super(_SLDSMeanFieldMixin, self).meanfield_update_parameters()
def meanfield_update_init_dynamics_distns(self):
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
E_stats = lambda i, s: \
tuple(s.expected_states[0,i] * stat for stat in s.E_init_stats)
for state, d in enumerate(self.init_dynamics_distns):
d.meanfieldupdate(
stats=sum_tuples(E_stats(state, s) for s in self.states_list))
def meanfield_update_dynamics_distns(self):
contract = partial(np.tensordot, axes=1)
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
E_stats = lambda i, s: \
tuple(contract(s.expected_states[1:,i], stat) for stat in s.E_dynamics_stats)
for state, d in enumerate(self.dynamics_distns):
d.meanfieldupdate(
stats=sum_tuples(E_stats(state, s) for s in self.states_list))
def meanfield_update_emission_distns(self):
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
if self._single_emission:
E_stats = lambda s: \
tuple(np.sum(stat, axis=0) for stat in s.E_emission_stats)
self._emission_distn.meanfieldupdate(
stats=sum_tuples(E_stats(s) for s in self.states_list))
else:
contract = partial(np.tensordot, axes=1)
E_stats = lambda i, s: \
tuple(contract(s.expected_states[:, i], stat) for stat in s.E_emission_stats)
for state, d in enumerate(self.emission_distns):
d.meanfieldupdate(
stats=sum_tuples(E_stats(state, s) for s in self.states_list))
def meanfield_update_obs_distns(self):
pass # handled in meanfield_update_parameters
### init
def _init_mf_from_gibbs(self):
# Now also update the emission and dynamics params
for ed in self.emission_distns:
if hasattr(ed, "_initialize_mean_field"):
ed._initialize_mean_field()
for dd in self.dynamics_distns:
if hasattr(dd, "_initialize_mean_field"):
dd._initialize_mean_field()
for s in self.states_list:
s._init_mf_from_gibbs()
### vlb
def vlb(self, states_last_updated=False):
vlb = 0.
vlb += sum(s.get_vlb(states_last_updated) for s in self.states_list)
vlb += self.trans_distn.get_vlb()
vlb += self.init_state_distn.get_vlb()
vlb += sum(d.get_vlb() for d in self.init_dynamics_distns)
vlb += sum(d.get_vlb() for d in self.dynamics_distns)
if self._single_emission:
vlb += self._emission_distn.get_vlb()
else:
vlb += sum(d.get_vlb() for d in self.emission_distns)
return vlb
class HMMSLDSPython(_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin, pyhsmm.models.HMMPython):
_states_class = HMMSLDSStatesPython
class HMMSLDS(_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin, pyhsmm.models.HMM):
_states_class = HMMSLDSStatesEigen
class HSMMSLDSPython(_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin, pyhsmm.models.HSMMPython):
_states_class = HSMMSLDSStatesPython
class HSMMSLDS(_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin, pyhsmm.models.HSMM):
_states_class = HSMMSLDSStatesEigen
class WeakLimitHDPHMMSLDS(_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin,
pyhsmm.models.WeakLimitHDPHMM):
_states_class = HMMSLDSStatesEigen
class WeakLimitStickyHDPHMMSLDS(
_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin,
pyhsmm.models.WeakLimitStickyHDPHMM):
_states_class = HMMSLDSStatesEigen
class WeakLimitHDPHSMMSLDS(_SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin,
pyhsmm.models.WeakLimitHDPHSMM):
_states_class = HSMMSLDSStatesEigen
## Default constructors
def _default_model(model_class, K, D_obs, D_latent, D_input=0,
mu_inits=None, sigma_inits=None,
As=None, Bs=None, sigma_statess=None,
Cs=None, Ds=None, sigma_obss=None,
alpha=3.0, init_state_distn='uniform',
**kwargs):
# Initialize init_dynamics_distns
init_dynamics_distns = \
[Gaussian(nu_0=D_latent+3,
sigma_0=3.*np.eye(D_latent),
mu_0=np.zeros(D_latent),
kappa_0=0.01)
for _ in range(K)]
if mu_inits is not None:
assert isinstance(mu_inits, list) and len(mu_inits) == K
for id, mu in zip(init_dynamics_distns, mu_inits):
id.mu = mu
if sigma_inits is not None:
assert isinstance(sigma_inits, list) and len(sigma_inits) == K
for id, sigma in zip(init_dynamics_distns, sigma_inits):
id.sigma = sigma
# Initialize dynamics distributions
dynamics_distns = [Regression(
nu_0=D_latent + 1,
S_0=D_latent * np.eye(D_latent),
M_0=np.hstack((.99 * np.eye(D_latent), np.zeros((D_latent, D_input)))),
K_0=D_latent * np.eye(D_latent + D_input))
for _ in range(K)]
if As is not None:
assert isinstance(As, list) and len(As) == K
if D_input > 0:
assert isinstance(Bs, list) and len(Bs) == K
As = [np.hstack((A, B)) for A,B in zip(As, Bs)]
else:
# As = [random_rotation(D_latent) for _ in range(K)]
As = [np.eye(D_latent) for _ in range(K)]
if D_input > 0:
As = [np.hstack((A, np.zeros((D_latent, D_input))))
for A in As]
for dd, A in zip(dynamics_distns, As):
dd.A = A
if sigma_statess is not None:
assert isinstance(sigma_statess, list) and len(sigma_statess) == K
else:
sigma_statess = [np.eye(D_latent) for _ in range(K)]
for dd, sigma in zip(dynamics_distns, sigma_statess):
dd.sigma = sigma
# Initialize emission distributions
_single_emission = (Cs is not None) and (not isinstance(Cs, list))
if _single_emission:
if D_input > 0:
assert Ds is not None and not isinstance(Ds, list)
Cs = np.hstack((Cs, Ds))
if sigma_obss is None:
sigma_obss = np.eye(D_obs)
emission_distns = Regression(
nu_0=D_obs + 3,
S_0=D_obs * np.eye(D_obs),
M_0=np.zeros((D_obs, D_latent + D_input)),
K_0=D_obs * np.eye(D_latent + D_input),
A=Cs, sigma=sigma_obss)
else:
emission_distns = [Regression(
nu_0=D_obs + 1,
S_0=D_obs * np.eye(D_obs),
M_0=np.zeros((D_obs, D_latent + D_input)),
K_0=D_obs * np.eye(D_latent + D_input))
for _ in range(K)]
if Cs is not None and sigma_obss is not None:
assert isinstance(Cs, list) and len(Cs) == K
assert isinstance(sigma_obss, list) and len(sigma_obss) == K
if D_input > 0:
assert isinstance(Ds, list) and len(Ds) == K
Cs = [np.hstack((C, D)) for C,D in zip(Cs, Ds)]
else:
Cs = [np.zeros((D_obs, D_latent + D_input)) for _ in range(K)]
sigma_obss = [0.05 * np.eye(D_obs) for _ in range(K)]
for ed, C, sigma in zip(emission_distns, Cs, sigma_obss):
ed.A = C
ed.sigma = sigma
model = model_class(
init_dynamics_distns=init_dynamics_distns,
dynamics_distns=dynamics_distns,
emission_distns=emission_distns,
init_state_distn=init_state_distn,
alpha=alpha,
**kwargs)
return model
def DefaultSLDS(K, D_obs, D_latent, D_input=0,
mu_inits=None, sigma_inits=None,
As=None, Bs=None, sigma_statess=None,
Cs=None, Ds=None, sigma_obss=None,
alpha=3.,
**kwargs):
return _default_model(HMMSLDS, K, D_obs, D_latent, D_input=D_input,
mu_inits=mu_inits, sigma_inits=sigma_inits,
As=As, Bs=Bs, sigma_statess=sigma_statess,
Cs=Cs, Ds=Ds, sigma_obss=sigma_obss,
alpha=alpha,
**kwargs)
def DefaultWeakLimitHDPSLDS(K, D_obs, D_latent, D_input=0,
mu_inits=None, sigma_inits=None,
As=None, Bs=None, sigma_statess=None,
Cs=None, Ds=None, sigma_obss=None,
alpha=3., gamma=3.,
**kwargs):
return _default_model(WeakLimitHDPHMMSLDS, K, D_obs, D_latent, D_input=D_input,
mu_inits=mu_inits, sigma_inits=sigma_inits,
As=As, Bs=Bs, sigma_statess=sigma_statess,
Cs=Cs, Ds=Ds, sigma_obss=sigma_obss,
alpha=alpha, gamma=gamma,
**kwargs)
def DefaultWeakLimitStickyHDPSLDS(K, D_obs, D_latent, D_input=0,
mu_inits=None, sigma_inits=None,
As=None, Bs=None, sigma_statess=None,
Cs=None, Ds=None, sigma_obss=None,
alpha=3., gamma=3., kappa=10.,
**kwargs):
return _default_model(WeakLimitStickyHDPHMMSLDS, K, D_obs, D_latent, D_input=D_input,
mu_inits=mu_inits, sigma_inits=sigma_inits,
As=As, Bs=Bs, sigma_statess=sigma_statess,
Cs=Cs, Ds=Ds, sigma_obss=sigma_obss,
kappa=kappa, alpha=alpha, gamma=gamma,
**kwargs)
class _CountSLDSMixin(_SLDSGibbsMixin):
def resample_emission_distns(self):
if self._single_emission:
data = [(np.hstack((s.gaussian_states, s.inputs)), s.data)
for s in self.states_list]
mask = [s.mask for s in self.states_list] if self.has_missing_data else None
omega = [s.omega for s in self.states_list]
self._emission_distn.resample(data=data, mask=mask, omega=omega)
else:
for state, d in enumerate(self.emission_distns):
data = [(np.hstack((s.gaussian_states[s.stateseq == state],
s.inputs[s.stateseq == state])),
s.data[s.stateseq == state])
for s in self.states_list]
mask = [s.mask[s.stateseq == state] for s in self.states_list] \
if self.has_missing_data else None
omega = [s.omega[s.stateseq == state] for s in self.states_list]
d.resample(data=data, mask=mask, omega=omega)
self._clear_caches()
class HMMCountSLDSPython(_CountSLDSMixin, pyhsmm.models.HMMPython):
_states_class = HMMCountSLDSStatesPython
class HMMCountSLDS(_CountSLDSMixin, pyhsmm.models.HMM):
_states_class = HMMCountSLDSStatesEigen
class HSMMCountSLDSPython(_CountSLDSMixin, pyhsmm.models.HSMMPython):
_states_class = HSMMCountSLDSStatesPython
class HSMMCountSLDS(_CountSLDSMixin, pyhsmm.models.HSMM):
_states_class = HSMMCountSLDSStatesEigen
class WeakLimitHDPHMMCountSLDS(_CountSLDSMixin, pyhsmm.models.WeakLimitHDPHMM):
_states_class = HMMCountSLDSStatesEigen
class WeakLimitStickyHDPHMMCountSLDS(
_CountSLDSMixin, pyhsmm.models.WeakLimitStickyHDPHMM):
_states_class = HMMCountSLDSStatesEigen
class WeakLimitHDPHSMMCountSLDS(
_CountSLDSMixin, pyhsmm.models.WeakLimitHDPHSMM):
_states_class = HSMMCountSLDSStatesEigen
| mit |
nihilus/diaphora | pygments/lexers/tcl.py | 47 | 5398 | # -*- coding: utf-8 -*-
"""
pygments.lexers.tcl
~~~~~~~~~~~~~~~~~~~
Lexers for Tcl and related languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number
from pygments.util import shebang_matches
__all__ = ['TclLexer']
class TclLexer(RegexLexer):
"""
For Tcl source code.
.. versionadded:: 0.10
"""
keyword_cmds_re = words((
'after', 'apply', 'array', 'break', 'catch', 'continue', 'elseif', 'else', 'error',
'eval', 'expr', 'for', 'foreach', 'global', 'if', 'namespace', 'proc', 'rename', 'return',
'set', 'switch', 'then', 'trace', 'unset', 'update', 'uplevel', 'upvar', 'variable',
'vwait', 'while'), prefix=r'\b', suffix=r'\b')
builtin_cmds_re = words((
'append', 'bgerror', 'binary', 'cd', 'chan', 'clock', 'close', 'concat', 'dde', 'dict',
'encoding', 'eof', 'exec', 'exit', 'fblocked', 'fconfigure', 'fcopy', 'file',
'fileevent', 'flush', 'format', 'gets', 'glob', 'history', 'http', 'incr', 'info', 'interp',
'join', 'lappend', 'lassign', 'lindex', 'linsert', 'list', 'llength', 'load', 'loadTk',
'lrange', 'lrepeat', 'lreplace', 'lreverse', 'lsearch', 'lset', 'lsort', 'mathfunc',
'mathop', 'memory', 'msgcat', 'open', 'package', 'pid', 'pkg::create', 'pkg_mkIndex',
'platform', 'platform::shell', 'puts', 'pwd', 're_syntax', 'read', 'refchan',
'regexp', 'registry', 'regsub', 'scan', 'seek', 'socket', 'source', 'split', 'string',
'subst', 'tell', 'time', 'tm', 'unknown', 'unload'), prefix=r'\b', suffix=r'\b')
name = 'Tcl'
aliases = ['tcl']
filenames = ['*.tcl', '*.rvt']
mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl']
def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""):
return [
(keyword_cmds_re, Keyword, 'params' + context),
(builtin_cmds_re, Name.Builtin, 'params' + context),
(r'([\w.-]+)', Name.Variable, 'params' + context),
(r'#', Comment, 'comment'),
]
tokens = {
'root': [
include('command'),
include('basic'),
include('data'),
(r'\}', Keyword), # HACK: somehow we miscounted our braces
],
'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
'command-in-brace': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-brace"),
'command-in-bracket': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-bracket"),
'command-in-paren': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-paren"),
'basic': [
(r'\(', Keyword, 'paren'),
(r'\[', Keyword, 'bracket'),
(r'\{', Keyword, 'brace'),
(r'"', String.Double, 'string'),
(r'(eq|ne|in|ni)\b', Operator.Word),
(r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator),
],
'data': [
(r'\s+', Text),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'0[0-7]+', Number.Oct),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\$([\w.:-]+)', Name.Variable),
(r'([\w.:-]+)', Text),
],
'params': [
(r';', Keyword, '#pop'),
(r'\n', Text, '#pop'),
(r'(else|elseif|then)\b', Keyword),
include('basic'),
include('data'),
],
'params-in-brace': [
(r'\}', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-paren': [
(r'\)', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-bracket': [
(r'\]', Keyword, ('#pop', '#pop')),
include('params')
],
'string': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double),
(r'"', String.Double, '#pop')
],
'string-square': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double),
(r'\]', String.Double, '#pop')
],
'brace': [
(r'\}', Keyword, '#pop'),
include('command-in-brace'),
include('basic'),
include('data'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('command-in-paren'),
include('basic'),
include('data'),
],
'bracket': [
(r'\]', Keyword, '#pop'),
include('command-in-bracket'),
include('basic'),
include('data'),
],
'comment': [
(r'.*[^\\]\n', Comment, '#pop'),
(r'.*\\\n', Comment),
],
}
def analyse_text(text):
return shebang_matches(text, r'(tcl)')
| gpl-2.0 |
ConnorGBrewster/servo | tests/wpt/web-platform-tests/tools/wptserve/wptserve/constants.py | 141 | 4619 | from . import utils
content_types = utils.invert_dict({"text/html": ["htm", "html"],
"application/json": ["json"],
"application/xhtml+xml": ["xht", "xhtm", "xhtml"],
"application/xml": ["xml"],
"application/x-xpinstall": ["xpi"],
"text/javascript": ["js"],
"text/css": ["css"],
"text/plain": ["txt", "md"],
"image/svg+xml": ["svg"],
"image/gif": ["gif"],
"image/jpeg": ["jpg", "jpeg"],
"image/png": ["png"],
"image/bmp": ["bmp"],
"text/event-stream": ["event_stream"],
"text/cache-manifest": ["manifest"],
"video/mp4": ["mp4", "m4v"],
"audio/mp4": ["m4a"],
"audio/mpeg": ["mp3"],
"video/webm": ["webm"],
"audio/webm": ["weba"],
"video/ogg": ["ogg", "ogv"],
"audio/ogg": ["oga"],
"audio/x-wav": ["wav"],
"text/vtt": ["vtt"],})
response_codes = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
| mpl-2.0 |
cpollard1001/FreeCAD_sf_master | src/Mod/OpenSCAD/colorcodeshapes.py | 29 | 4659 | #***************************************************************************
#* *
#* Copyright (c) 2012 Sebastian Hoogen <github@sebastianhoogen.de> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="FreeCAD OpenSCAD Workbench - 2D helper fuctions"
__author__ = "Sebastian Hoogen"
__url__ = ["http://www.freecadweb.org"]
'''
This Script includes python functions to find out the most basic shape type
in a compound and to change the color of shapes according to their shape type
'''
import FreeCAD
def shapedict(shapelst):
return dict([(shape.hashCode(),shape) for shape in shapelst])
def shapeset(shapelst):
return set([shape.hashCode() for shape in shapelst])
def mostbasiccompound(comp):
'''searches fo the most basic shape in a Compound'''
solids=shapeset(comp.Solids)
shells=shapeset(comp.Shells)
faces=shapeset(comp.Faces)
wires=shapeset(comp.Wires)
edges=shapeset(comp.Edges)
vertexes=shapeset(comp.Vertexes)
#FreeCAD.Console.PrintMessage('%s\n' % (str((len(solids),len(shells),len(faces),len(wires),len(edges),len(vertexes)))))
for shape in comp.Solids:
shells -= shapeset(shape.Shells)
faces -= shapeset(shape.Faces)
wires -= shapeset(shape.Wires)
edges -= shapeset(shape.Edges)
vertexes -= shapeset(shape.Vertexes)
for shape in comp.Shells:
faces -= shapeset(shape.Faces)
wires -= shapeset(shape.Wires)
edges -= shapeset(shape.Edges)
vertexes -= shapeset(shape.Vertexes)
for shape in comp.Faces:
wires -= shapeset(shape.Wires)
edges -= shapeset(shape.Edges)
vertexes -= shapeset(shape.Vertexes)
for shape in comp.Wires:
edges -= shapeset(shape.Edges)
vertexes -= shapeset(shape.Vertexes)
for shape in comp.Edges:
vertexes -= shapeset(shape.Vertexes)
#FreeCAD.Console.PrintMessage('%s\n' % (str((len(solids),len(shells),len(faces),len(wires),len(edges),len(vertexes)))))
#return len(solids),len(shells),len(faces),len(wires),len(edges),len(vertexes)
if vertexes:
return "Vertex"
elif edges:
return "Edge"
elif wires:
return "Wire"
elif faces:
return "Face"
elif shells:
return "Shell"
elif solids:
return "Solid"
def colorcodeshapes(objs):
shapecolors={
"Compound":(0.3,0.3,0.4),
"CompSolid":(0.1,0.5,0.0),
"Solid":(0.0,0.8,0.0),
"Shell":(0.8,0.0,0.0),
"Face":(0.6,0.6,0.0),
"Wire":(0.1,0.1,0.1),
"Edge":(1.0,1.0,1.0),
"Vertex":(8.0,8.0,8.0),
"Shape":(0.0,0.0,1.0),
None:(0.0,0.0,0.0)}
for obj in objs:
if hasattr(obj,'Shape'):
try:
if obj.Shape.isNull():
continue
if not obj.Shape.isValid():
color=(1.0,0.4,0.4)
else:
st=obj.Shape.ShapeType
if st in ["Compound","CompSolid"]:
st = mostbasiccompound(obj.Shape)
color=shapecolors[st]
obj.ViewObject.ShapeColor = color
except:
raise
#colorcodeshapes(App.ActiveDocument.Objects)
| lgpl-2.1 |
alexandernajafi/nifty_stuff | patrik/emacs.d/plugins/yasnippet/doc/compile-doc.py | 21 | 3784 | #!/usr/bin/python
# Compile document to HTML use docutils.
# ========================================
# Pygments syntax highlighting
# ========================================
from pygments.formatters import HtmlFormatter
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = True
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
directives.register_directive('sourcecode', pygments_directive)
# =================
# Youtube embedding
# =================
from docutils import nodes
from docutils.parsers.rst import directives
CODE = """\
<object type="application/x-shockwave-flash"
width="%(width)s"
height="%(height)s"
align="%(align)s"
class="youtube-embed"
data="http://www.youtube.com/v/%(yid)s">
<param name="movie" value="http://www.youtube.com/v/%(yid)s"></param>
<param name="wmode" value="transparent"></param>%(extra)s
</object>
"""
PARAM = """\n <param name="%s" value="%s"></param>"""
def youtube(name, args, options, content, lineno,
contentOffset, blockText, state, stateMachine):
""" Restructured text extension for inserting youtube embedded videos """
if len(content) == 0:
return
string_vars = {
'yid': content[0],
'width': 425,
'height': 344,
'align': "right",
'extra': ''
}
extra_args = content[1:] # Because content[0] is ID
extra_args = [ea.strip().split("=") for ea in extra_args] # key=value
extra_args = [ea for ea in extra_args if len(ea) == 2] # drop bad lines
extra_args = dict(extra_args)
if 'width' in extra_args:
string_vars['width'] = extra_args.pop('width')
if 'align' in extra_args:
string_vars['align'] = extra_args.pop('align')
if 'height' in extra_args:
string_vars['height'] = extra_args.pop('height')
if extra_args:
params = [PARAM % (key, extra_args[key]) for key in extra_args]
string_vars['extra'] = "".join(params)
return [nodes.raw('', CODE % (string_vars), format='html')]
youtube.content = True
directives.register_directive('youtube', youtube)
# ========================================
# Command line processing
# ========================================
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
overrides = {'stylesheet_path' : 'styles.css',
'embed_stylesheet' : False,
'template' : 'doc/template.txt'}
publish_cmdline(writer_name='html',
description=description,
settings_overrides=overrides)
| bsd-2-clause |
sssllliang/silverberry | lib/requests/packages/chardet/langcyrillicmodel.py | 2762 | 17725 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| apache-2.0 |
mitsuhiko/sentry | src/sentry/models/authprovider.py | 5 | 1606 | from __future__ import absolute_import, print_function
from bitfield import BitField
from django.db import models
from django.utils import timezone
from jsonfield import JSONField
from sentry.db.models import (
BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr
)
class AuthProvider(Model):
__core__ = True
organization = FlexibleForeignKey('sentry.Organization', unique=True)
provider = models.CharField(max_length=128)
config = JSONField()
date_added = models.DateTimeField(default=timezone.now)
sync_time = BoundedPositiveIntegerField(null=True)
last_sync = models.DateTimeField(null=True)
default_role = BoundedPositiveIntegerField(default=50)
default_global_access = models.BooleanField(default=True)
# TODO(dcramer): ManyToMany has the same issue as ForeignKey and we need
# to either write our own which works w/ BigAuto or switch this to use
# through.
default_teams = models.ManyToManyField('sentry.Team', blank=True)
flags = BitField(flags=(
('allow_unlinked', 'Grant access to members who have not linked SSO accounts.'),
), default=0)
class Meta:
app_label = 'sentry'
db_table = 'sentry_authprovider'
__repr__ = sane_repr('organization_id', 'provider')
def __unicode__(self):
return self.provider
def get_provider(self):
from sentry.auth import manager
return manager.get(self.provider, **self.config)
def get_audit_log_data(self):
return {
'provider': self.provider,
'config': self.config,
}
| bsd-3-clause |
achang97/YouTunes | lib/python2.7/site-packages/gunicorn/reloader.py | 13 | 3598 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import os.path
import re
import sys
import time
import threading
class Reloader(threading.Thread):
def __init__(self, extra_files=None, interval=1, callback=None):
super(Reloader, self).__init__()
self.setDaemon(True)
self._extra_files = set(extra_files or ())
self._extra_files_lock = threading.RLock()
self._interval = interval
self._callback = callback
def add_extra_file(self, filename):
with self._extra_files_lock:
self._extra_files.add(filename)
def get_files(self):
fnames = [
re.sub('py[co]$', 'py', module.__file__)
for module in list(sys.modules.values())
if hasattr(module, '__file__')
]
with self._extra_files_lock:
fnames.extend(self._extra_files)
return fnames
def run(self):
mtimes = {}
while True:
for filename in self.get_files():
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
if self._callback:
self._callback(filename)
time.sleep(self._interval)
try:
from inotify.adapters import Inotify
import inotify.constants
has_inotify = True
except ImportError:
has_inotify = False
class InotifyReloader():
def __init__(self, callback=None):
raise ImportError('You must have the inotify module installed to use '
'the inotify reloader')
if has_inotify:
class InotifyReloader(threading.Thread):
event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE
| inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY
| inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM
| inotify.constants.IN_MOVED_TO)
def __init__(self, extra_files=None, callback=None):
super(InotifyReloader, self).__init__()
self.setDaemon(True)
self._callback = callback
self._dirs = set()
self._watcher = Inotify()
def add_extra_file(self, filename):
dirname = os.path.dirname(filename)
if dirname in self._dirs:
return
self._watcher.add_watch(dirname, mask=self.event_mask)
self._dirs.add(dirname)
def get_dirs(self):
fnames = [
os.path.dirname(re.sub('py[co]$', 'py', module.__file__))
for module in list(sys.modules.values())
if hasattr(module, '__file__')
]
return set(fnames)
def run(self):
self._dirs = self.get_dirs()
for dirname in self._dirs:
self._watcher.add_watch(dirname, mask=self.event_mask)
for event in self._watcher.event_gen():
if event is None:
continue
filename = event[3]
self._callback(filename)
preferred_reloader = InotifyReloader if has_inotify else Reloader
reloader_engines = {
'auto': preferred_reloader,
'poll': Reloader,
'inotify': InotifyReloader,
}
| mit |
repotvsupertuga/repo | plugin.video.loganaddon/resources/lib/libraries/f4mproxy/flvlib/helpers.py | 95 | 5650 | import os
import time
import datetime
from StringIO import StringIO
from UserDict import DictMixin
class UTC(datetime.tzinfo):
"""
A UTC tzinfo class, based on
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return self.ZERO
utc = UTC()
class OrderedAttrDict(DictMixin):
"""
A dictionary that preserves insert order and also has an attribute
interface.
Values can be transparently accessed and set as keys or as attributes.
"""
def __init__(self, dict=None, **kwargs):
self.__dict__["_order_priv_"] = []
self.__dict__["_data_priv_"] = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
# Mapping interface
def __setitem__(self, key, value):
if key not in self:
self._order_priv_.append(key)
self._data_priv_[key] = value
def __getitem__(self, key):
return self._data_priv_[key]
def __delitem__(self, key):
del self._data_priv_[key]
self._order_priv_.remove(key)
def keys(self):
return list(self._order_priv_)
# Attribute interface
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
try:
del self[name]
except KeyError:
raise AttributeError(name)
# Equality
def __eq__(self, other):
try:
my_iter = self.iteritems()
his_iter = other.iteritems()
except AttributeError:
return False
my_empty = False
his_empty = False
while True:
try:
my_key, my_val = my_iter.next()
except StopIteration:
my_empty = True
try:
his_key, his_val = his_iter.next()
except StopIteration:
his_empty = True
if my_empty and his_empty:
return True
if my_empty or his_empty:
return False
if (my_key, my_val) != (his_key, his_val):
return False
# String representation
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self)
def __str__(self):
return '{' + ', '.join([('%r: %r' % (key, self[key]))
for key in self._order_priv_]) + '}'
class ASPrettyPrinter(object):
"""Pretty printing of AS objects"""
def pformat(cls, val, indent=0):
cls.io = StringIO()
cls.pprint_lookup(val, indent)
return cls.io.getvalue()
pformat = classmethod(pformat)
def pprint(cls, val):
print cls.pformat(val)
pprint = classmethod(pprint)
def pprint_lookup(cls, val, ident):
if isinstance(val, basestring):
return cls.pprint_string(val)
if isinstance(val, (int, long, float)):
return cls.pprint_number(val)
if isinstance(val, datetime.datetime):
return cls.pprint_datetime(val)
if hasattr(val, 'iterkeys'):
# dict interface
return cls.pprint_dict(val, ident)
if hasattr(val, 'append'):
# list interface
return cls.pprint_list(val, ident)
# Unknown type ?
cls.io.write("%r" % (val, ))
return False
pprint_lookup = classmethod(pprint_lookup)
def pprint_string(cls, val):
if isinstance(val, unicode):
cls.io.write("u'%s'" % val.encode("UTF8"))
else:
cls.io.write("'%s'" % val)
return False
pprint_string = classmethod(pprint_string)
def pprint_number(cls, val):
cls.io.write(str(val))
return False
pprint_number = classmethod(pprint_number)
def pprint_datetime(cls, val):
cls.io.write(val.replace(microsecond=0).isoformat(' '))
return False
pprint_datetime = classmethod(pprint_datetime)
def pprint_dict(cls, val, indent):
def pprint_item(k):
last_pos = cls.io.tell()
cls.io.write(repr(k))
cls.io.write(": ")
new_indent = indent + cls.io.tell() - last_pos + 1
return cls.pprint_lookup(val[k], new_indent)
cls.io.write('{')
indented = False
keys = list(val.iterkeys())
if keys:
for k in keys[:-1]:
indented |= pprint_item(k)
cls.io.write(",\n%s " % (" "*indent))
indented |= pprint_item(keys[-1])
cls.io.write('}')
return (len(keys) > 1) | indented
pprint_dict = classmethod(pprint_dict)
def pprint_list(cls, val, indent):
last_pos = cls.io.tell()
cls.io.write('[')
new_indent = indent + cls.io.tell() - last_pos
indented = False
values = list(iter(val))
if values:
for v in values[:-1]:
indented |= cls.pprint_lookup(v, new_indent)
cls.io.write(",\n%s" % (" "*new_indent))
indented |= cls.pprint_lookup(values[-1], new_indent)
cls.io.write(']')
return (len(values) > 1) | indented
pprint_list = classmethod(pprint_list)
pformat = ASPrettyPrinter.pformat
pprint = ASPrettyPrinter.pprint
def force_remove(path):
try:
os.remove(path)
except OSError:
pass
| gpl-2.0 |
ahmadRagheb/goldenHR | erpnext/patches/v5_0/recalculate_total_amount_in_jv.py | 108 | 1062 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe.utils import money_in_words
def execute():
company_currency = dict(frappe.db.sql("select name, default_currency from `tabCompany`"))
bank_or_cash_accounts = frappe.db.sql_list("""select name from `tabAccount`
where account_type in ('Bank', 'Cash') and docstatus < 2""")
for je in frappe.db.sql_list("""select name from `tabJournal Entry` where docstatus < 2"""):
total_amount = 0
total_amount_in_words = ""
je_doc = frappe.get_doc('Journal Entry', je)
for d in je_doc.get("accounts"):
if (d.party_type and d.party) or d.account in bank_or_cash_accounts:
total_amount = d.debit or d.credit
if total_amount:
total_amount_in_words = money_in_words(total_amount, company_currency.get(je_doc.company))
if total_amount:
frappe.db.sql("""update `tabJournal Entry` set total_amount=%s, total_amount_in_words=%s
where name = %s""", (total_amount, total_amount_in_words, je))
| gpl-3.0 |
shivakumardesai/ZendDoctrine | vendor/doctrine/orm/docs/en/conf.py | 2448 | 6497 | # -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
| bsd-3-clause |
minhphung171093/GreenERP_V8 | openerp/addons/payment_authorize/models/authorize.py | 201 | 6963 | # -*- coding: utf-'8' "-*-"
import hashlib
import hmac
import logging
import time
import urlparse
from openerp import api, fields, models
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_authorize.controllers.main import AuthorizeController
from openerp.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
class PaymentAcquirerAuthorize(models.Model):
_inherit = 'payment.acquirer'
def _get_authorize_urls(self, environment):
""" Authorize URLs """
if environment == 'prod':
return {'authorize_form_url': 'https://secure.authorize.net/gateway/transact.dll'}
else:
return {'authorize_form_url': 'https://test.authorize.net/gateway/transact.dll'}
@api.model
def _get_providers(self):
providers = super(PaymentAcquirerAuthorize, self)._get_providers()
providers.append(['authorize', 'Authorize.Net'])
return providers
authorize_login = fields.Char(string='API Login Id', required_if_provider='authorize')
authorize_transaction_key = fields.Char(string='API Transaction Key', required_if_provider='authorize')
def _authorize_generate_hashing(self, values):
data = '^'.join([
values['x_login'],
values['x_fp_sequence'],
values['x_fp_timestamp'],
values['x_amount'],
values['x_currency_code']])
return hmac.new(str(values['x_trans_key']), data, hashlib.md5).hexdigest()
@api.multi
def authorize_form_generate_values(self, partner_values, tx_values):
self.ensure_one()
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
authorize_tx_values = dict(tx_values)
temp_authorize_tx_values = {
'x_login': self.authorize_login,
'x_trans_key': self.authorize_transaction_key,
'x_amount': str(tx_values['amount']),
'x_show_form': 'PAYMENT_FORM',
'x_type': 'AUTH_CAPTURE',
'x_method': 'CC',
'x_fp_sequence': '%s%s' % (self.id, int(time.time())),
'x_version': '3.1',
'x_relay_response': 'TRUE',
'x_fp_timestamp': str(int(time.time())),
'x_relay_url': '%s' % urlparse.urljoin(base_url, AuthorizeController._return_url),
'x_cancel_url': '%s' % urlparse.urljoin(base_url, AuthorizeController._cancel_url),
'x_currency_code': tx_values['currency'] and tx_values['currency'].name or '',
'address': partner_values['address'],
'city': partner_values['city'],
'country': partner_values['country'] and partner_values['country'].name or '',
'email': partner_values['email'],
'zip': partner_values['zip'],
'first_name': partner_values['first_name'],
'last_name': partner_values['last_name'],
'phone': partner_values['phone'],
'state': partner_values.get('state') and partner_values['state'].name or '',
}
temp_authorize_tx_values['returndata'] = authorize_tx_values.pop('return_url', '')
temp_authorize_tx_values['x_fp_hash'] = self._authorize_generate_hashing(temp_authorize_tx_values)
authorize_tx_values.update(temp_authorize_tx_values)
return partner_values, authorize_tx_values
@api.multi
def authorize_get_form_action_url(self):
self.ensure_one()
return self._get_authorize_urls(self.environment)['authorize_form_url']
class TxAuthorize(models.Model):
_inherit = 'payment.transaction'
authorize_txnid = fields.Char(string='Transaction ID')
_authorize_valid_tx_status = 1
_authorize_pending_tx_status = 4
_authorize_cancel_tx_status = 2
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
@api.model
def _authorize_form_get_tx_from_data(self, data):
""" Given a data dict coming from authorize, verify it and find the related
transaction record. """
reference, trans_id, fingerprint = data.get('x_invoice_num'), data.get('x_trans_id'), data.get('x_MD5_Hash')
if not reference or not trans_id or not fingerprint:
error_msg = 'Authorize: received data with missing reference (%s) or trans_id (%s) or fingerprint (%s)' % (reference, trans_id, fingerprint)
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.search([('reference', '=', reference)])
if not tx or len(tx) > 1:
error_msg = 'Authorize: received data for reference %s' % (reference)
if not tx:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
return tx[0]
@api.model
def _authorize_form_get_invalid_parameters(self, tx, data):
invalid_parameters = []
if tx.authorize_txnid and data.get('x_trans_id') != tx.authorize_txnid:
invalid_parameters.append(('Transaction Id', data.get('x_trans_id'), tx.authorize_txnid))
# check what is buyed
if float_compare(float(data.get('x_amount', '0.0')), tx.amount, 2) != 0:
invalid_parameters.append(('Amount', data.get('x_amount'), '%.2f' % tx.amount))
return invalid_parameters
@api.model
def _authorize_form_validate(self, tx, data):
if tx.state == 'done':
_logger.warning('Authorize: trying to validate an already validated tx (ref %s)' % tx.reference)
return True
status_code = int(data.get('x_response_code', '0'))
if status_code == self._authorize_valid_tx_status:
tx.write({
'state': 'done',
'authorize_txnid': data.get('x_trans_id'),
'acquirer_reference': data['x_invoice_num'],
})
return True
elif status_code == self._authorize_pending_tx_status:
tx.write({
'state': 'pending',
'authorize_txnid': data.get('x_trans_id'),
'acquirer_reference': data['x_invoice_num'],
})
return True
elif status_code == self._authorize_cancel_tx_status:
tx.write({
'state': 'cancel',
'authorize_txnid': data.get('x_trans_id'),
'acquirer_reference': data['x_invoice_num'],
})
return True
else:
error = data.get('x_response_reason_text')
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error,
'authorize_txnid': data.get('x_trans_id'),
'acquirer_reference': data['x_invoice_num'],
})
return False
| agpl-3.0 |
jdowner/gist | tests/test_config.py | 1 | 1307 | import configparser
import gist
import pytest
@pytest.fixture
def config():
cfg = configparser.ConfigParser()
cfg.add_section("gist")
return cfg
def test_get_value_from_command():
"""
Ensure that values which start with ``!`` are treated as commands and
return the string printed to stdout by the command, otherwise ensure
that the value passed to the function is returned.
"""
assert "magic token" == gist.client.get_value_from_command('!echo "\nmagic token"')
assert "magic token" == gist.client.get_value_from_command(' !echo "magic token\n"')
assert "magic token" == gist.client.get_value_from_command("magic token")
def test_get_personal_access_token_missing(config):
with pytest.raises(gist.client.GistMissingTokenError):
gist.client.get_personal_access_token(config)
@pytest.mark.parametrize("token", ["", " "])
def test_get_personal_access_token_empty(config, token):
config.set("gist", "token", token)
with pytest.raises(gist.client.GistEmptyTokenError):
gist.client.get_personal_access_token(config)
@pytest.mark.parametrize("token", [" 123 ", "123abcABC0987"])
def test_get_personal_access_token_valid(config, token):
config.set("gist", "token", token)
gist.client.get_personal_access_token(config)
| mit |
lesina/Hack70 | src/content/migrations/0001_initial.py | 1 | 1381 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-20 07:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('courses', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('document', models.FilePathField(path='/documents')),
],
),
migrations.CreateModel(
name='GeneralModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='courses.Course')),
],
),
]
| gpl-3.0 |
toolforger/sympy | sympy/polys/domains/algebraicfield.py | 93 | 4194 | """Implementation of :class:`AlgebraicField` class. """
from __future__ import print_function, division
from sympy.polys.domains.field import Field
from sympy.polys.domains.simpledomain import SimpleDomain
from sympy.polys.domains.characteristiczero import CharacteristicZero
from sympy.polys.polyclasses import ANP
from sympy.polys.polyerrors import CoercionFailed, DomainError, NotAlgebraic, IsomorphismFailed
from sympy.utilities import public
@public
class AlgebraicField(Field, CharacteristicZero, SimpleDomain):
"""A class for representing algebraic number fields. """
dtype = ANP
is_AlgebraicField = is_Algebraic = True
is_Numerical = True
has_assoc_Ring = False
has_assoc_Field = True
def __init__(self, dom, *ext):
if not dom.is_QQ:
raise DomainError("ground domain must be a rational field")
from sympy.polys.numberfields import to_number_field
self.orig_ext = ext
self.ext = to_number_field(ext)
self.mod = self.ext.minpoly.rep
self.domain = self.dom = dom
self.ngens = 1
self.symbols = self.gens = (self.ext,)
self.unit = self([dom(1), dom(0)])
self.zero = self.dtype.zero(self.mod.rep, dom)
self.one = self.dtype.one(self.mod.rep, dom)
def new(self, element):
return self.dtype(element, self.mod.rep, self.dom)
def __str__(self):
return str(self.dom) + '<' + str(self.ext) + '>'
def __hash__(self):
return hash((self.__class__.__name__, self.dtype, self.dom, self.ext))
def __eq__(self, other):
"""Returns ``True`` if two domains are equivalent. """
return isinstance(other, AlgebraicField) and \
self.dtype == other.dtype and self.ext == other.ext
def algebraic_field(self, *extension):
r"""Returns an algebraic field, i.e. `\mathbb{Q}(\alpha, \dots)`. """
return AlgebraicField(self.dom, *((self.ext,) + extension))
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
from sympy.polys.numberfields import AlgebraicNumber
return AlgebraicNumber(self.ext, a).as_expr()
def from_sympy(self, a):
"""Convert SymPy's expression to ``dtype``. """
try:
return self([self.dom.from_sympy(a)])
except CoercionFailed:
pass
from sympy.polys.numberfields import to_number_field
try:
return self(to_number_field(a, self.ext).native_coeffs())
except (NotAlgebraic, IsomorphismFailed):
raise CoercionFailed(
"%s is not a valid algebraic number in %s" % (a, self))
def from_ZZ_python(K1, a, K0):
"""Convert a Python ``int`` object to ``dtype``. """
return K1(K1.dom.convert(a, K0))
def from_QQ_python(K1, a, K0):
"""Convert a Python ``Fraction`` object to ``dtype``. """
return K1(K1.dom.convert(a, K0))
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpz`` object to ``dtype``. """
return K1(K1.dom.convert(a, K0))
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpq`` object to ``dtype``. """
return K1(K1.dom.convert(a, K0))
def from_RealField(K1, a, K0):
"""Convert a mpmath ``mpf`` object to ``dtype``. """
return K1(K1.dom.convert(a, K0))
def get_ring(self):
"""Returns a ring associated with ``self``. """
raise DomainError('there is no ring associated with %s' % self)
def is_positive(self, a):
"""Returns True if ``a`` is positive. """
return self.dom.is_positive(a.LC())
def is_negative(self, a):
"""Returns True if ``a`` is negative. """
return self.dom.is_negative(a.LC())
def is_nonpositive(self, a):
"""Returns True if ``a`` is non-positive. """
return self.dom.is_nonpositive(a.LC())
def is_nonnegative(self, a):
"""Returns True if ``a`` is non-negative. """
return self.dom.is_nonnegative(a.LC())
def numer(self, a):
"""Returns numerator of ``a``. """
return a
def denom(self, a):
"""Returns denominator of ``a``. """
return self.one
| bsd-3-clause |
jamestwebber/scipy | scipy/fftpack/tests/test_real_transforms.py | 2 | 24175 | from __future__ import division, print_function, absolute_import
from os.path import join, dirname
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal
import pytest
from pytest import raises as assert_raises
from scipy.fftpack.realtransforms import (
dct, idct, dst, idst, dctn, idctn, dstn, idstn)
# Matlab reference data
MDATA = np.load(join(dirname(__file__), 'test.npz'))
X = [MDATA['x%d' % i] for i in range(8)]
Y = [MDATA['y%d' % i] for i in range(8)]
# FFTW reference data: the data are organized as follows:
# * SIZES is an array containing all available sizes
# * for every type (1, 2, 3, 4) and every size, the array dct_type_size
# contains the output of the DCT applied to the input np.linspace(0, size-1,
# size)
FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz'))
FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz'))
FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes']
def fftw_dct_ref(type, size, dt):
x = np.linspace(0, size-1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = FFTWDATA_DOUBLE
elif dt == np.float32:
data = FFTWDATA_SINGLE
else:
raise ValueError()
y = (data['dct_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
def fftw_dst_ref(type, size, dt):
x = np.linspace(0, size-1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = FFTWDATA_DOUBLE
elif dt == np.float32:
data = FFTWDATA_SINGLE
else:
raise ValueError()
y = (data['dst_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
def dct_2d_ref(x, **kwargs):
"""Calculate reference values for testing dct2."""
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = dct(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = dct(x[:, col], **kwargs)
return x
def idct_2d_ref(x, **kwargs):
"""Calculate reference values for testing idct2."""
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = idct(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = idct(x[:, col], **kwargs)
return x
def dst_2d_ref(x, **kwargs):
"""Calculate reference values for testing dst2."""
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = dst(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = dst(x[:, col], **kwargs)
return x
def idst_2d_ref(x, **kwargs):
"""Calculate reference values for testing idst2."""
x = np.array(x, copy=True)
for row in range(x.shape[0]):
x[row, :] = idst(x[row, :], **kwargs)
for col in range(x.shape[1]):
x[:, col] = idst(x[:, col], **kwargs)
return x
def naive_dct1(x, norm=None):
"""Calculate textbook definition version of DCT-I."""
x = np.array(x, copy=True)
N = len(x)
M = N-1
y = np.zeros(N)
m0, m = 1, 2
if norm == 'ortho':
m0 = np.sqrt(1.0/M)
m = np.sqrt(2.0/M)
for k in range(N):
for n in range(1, N-1):
y[k] += m*x[n]*np.cos(np.pi*n*k/M)
y[k] += m0 * x[0]
y[k] += m0 * x[N-1] * (1 if k % 2 == 0 else -1)
if norm == 'ortho':
y[0] *= 1/np.sqrt(2)
y[N-1] *= 1/np.sqrt(2)
return y
def naive_dst1(x, norm=None):
"""Calculate textbook definition version of DST-I."""
x = np.array(x, copy=True)
N = len(x)
M = N+1
y = np.zeros(N)
for k in range(N):
for n in range(N):
y[k] += 2*x[n]*np.sin(np.pi*(n+1.0)*(k+1.0)/M)
if norm == 'ortho':
y *= np.sqrt(0.5/M)
return y
def naive_dct4(x, norm=None):
"""Calculate textbook definition version of DCT-IV."""
x = np.array(x, copy=True)
N = len(x)
y = np.zeros(N)
for k in range(N):
for n in range(N):
y[k] += x[n]*np.cos(np.pi*(n+0.5)*(k+0.5)/(N))
if norm == 'ortho':
y *= np.sqrt(2.0/N)
else:
y *= 2
return y
def naive_dst4(x, norm=None):
"""Calculate textbook definition version of DST-IV."""
x = np.array(x, copy=True)
N = len(x)
y = np.zeros(N)
for k in range(N):
for n in range(N):
y[k] += x[n]*np.sin(np.pi*(n+0.5)*(k+0.5)/(N))
if norm == 'ortho':
y *= np.sqrt(2.0/N)
else:
y *= 2
return y
class TestComplex(object):
def test_dct_complex64(self):
y = dct(1j*np.arange(5, dtype=np.complex64))
x = 1j*dct(np.arange(5))
assert_array_almost_equal(x, y)
def test_dct_complex(self):
y = dct(np.arange(5)*1j)
x = 1j*dct(np.arange(5))
assert_array_almost_equal(x, y)
def test_idct_complex(self):
y = idct(np.arange(5)*1j)
x = 1j*idct(np.arange(5))
assert_array_almost_equal(x, y)
def test_dst_complex64(self):
y = dst(np.arange(5, dtype=np.complex64)*1j)
x = 1j*dst(np.arange(5))
assert_array_almost_equal(x, y)
def test_dst_complex(self):
y = dst(np.arange(5)*1j)
x = 1j*dst(np.arange(5))
assert_array_almost_equal(x, y)
def test_idst_complex(self):
y = idst(np.arange(5)*1j)
x = 1j*idst(np.arange(5))
assert_array_almost_equal(x, y)
class _TestDCTBase(object):
def setup_method(self):
self.rdt = None
self.dec = 14
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
x, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
y = dct(x, type=self.type)
assert_equal(y.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
err_msg="Size %d failed" % i)
def test_axis(self):
nt = 2
for i in [7, 8, 9, 16, 32, 64]:
x = np.random.randn(nt, i)
y = dct(x, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[j], dct(x[j], type=self.type),
decimal=self.dec)
x = x.T
y = dct(x, axis=0, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[:,j], dct(x[:,j], type=self.type),
decimal=self.dec)
class _TestDCTIBase(_TestDCTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
for i in range(len(X)):
x = np.array(X[i], dtype=self.rdt)
dt = np.result_type(np.float32, self.rdt)
y = dct(x, norm='ortho', type=1)
y2 = naive_dct1(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
class _TestDCTIIBase(_TestDCTBase):
def test_definition_matlab(self):
# Test correspondence with MATLAB (orthornomal mode).
for i in range(len(X)):
dt = np.result_type(np.float32, self.rdt)
x = np.array(X[i], dtype=dt)
yr = Y[i]
y = dct(x, norm="ortho", type=2)
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, yr, decimal=self.dec)
class _TestDCTIIIBase(_TestDCTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
for i in range(len(X)):
x = np.array(X[i], dtype=self.rdt)
dt = np.result_type(np.float32, self.rdt)
y = dct(x, norm='ortho', type=2)
xi = dct(y, norm="ortho", type=3)
assert_equal(xi.dtype, dt)
assert_array_almost_equal(xi, x, decimal=self.dec)
class _TestDCTIVBase(_TestDCTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
for i in range(len(X)):
x = np.array(X[i], dtype=self.rdt)
dt = np.result_type(np.float32, self.rdt)
y = dct(x, norm='ortho', type=4)
y2 = naive_dct4(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
class TestDCTIDouble(_TestDCTIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 1
class TestDCTIFloat(_TestDCTIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestDCTIInt(_TestDCTIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 1
class TestDCTIIDouble(_TestDCTIIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 2
class TestDCTIIFloat(_TestDCTIIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 2
class TestDCTIIInt(_TestDCTIIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 2
class TestDCTIIIDouble(_TestDCTIIIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestDCTIIIFloat(_TestDCTIIIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestDCTIIIInt(_TestDCTIIIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 3
class TestDCTIVDouble(_TestDCTIVBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 3
class TestDCTIVFloat(_TestDCTIVBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestDCTIVInt(_TestDCTIVBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 3
class _TestIDCTBase(object):
def setup_method(self):
self.rdt = None
self.dec = 14
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
x = idct(yr, type=self.type)
if self.type == 1:
x /= 2 * (i-1)
else:
x /= 2 * i
assert_equal(x.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestIDCTIDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 1
class TestIDCTIFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestIDCTIInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 4
self.type = 1
class TestIDCTIIDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 10
self.type = 2
class TestIDCTIIFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 2
class TestIDCTIIInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 2
class TestIDCTIIIDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestIDCTIIIFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestIDCTIIIInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 3
class TestIDCTIVDouble(_TestIDCTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 4
class TestIDCTIVFloat(_TestIDCTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 5
self.type = 4
class TestIDCTIVInt(_TestIDCTBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 4
class _TestDSTBase(object):
def setup_method(self):
self.rdt = None # dtype
self.dec = None # number of decimals to match
self.type = None # dst type
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
y = dst(xr, type=self.type)
assert_equal(y.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
err_msg="Size %d failed" % i)
class _TestDSTIBase(_TestDSTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
for i in range(len(X)):
x = np.array(X[i], dtype=self.rdt)
dt = np.result_type(np.float32, self.rdt)
y = dst(x, norm='ortho', type=1)
y2 = naive_dst1(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y / np.max(y), y2 / np.max(y), decimal=self.dec)
class _TestDSTIVBase(_TestDSTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
for i in range(len(X)):
x = np.array(X[i], dtype=self.rdt)
dt = np.result_type(np.float32, self.rdt)
y = dst(x, norm='ortho', type=4)
y2 = naive_dst4(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, y2, decimal=self.dec)
class TestDSTIDouble(_TestDSTIBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 1
class TestDSTIFloat(_TestDSTIBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestDSTIInt(_TestDSTIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 1
class TestDSTIIDouble(_TestDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 2
class TestDSTIIFloat(_TestDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 2
class TestDSTIIInt(_TestDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 2
class TestDSTIIIDouble(_TestDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestDSTIIIFloat(_TestDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 7
self.type = 3
class TestDSTIIIInt(_TestDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 7
self.type = 3
class TestDSTIVDouble(_TestDSTIVBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 4
class TestDSTIVFloat(_TestDSTIVBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 4
class TestDSTIVInt(_TestDSTIVBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 4
class _TestIDSTBase(object):
def setup_method(self):
self.rdt = None
self.dec = None
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
x = idst(yr, type=self.type)
if self.type == 1:
x /= 2 * (i+1)
else:
x /= 2 * i
assert_equal(x.dtype, dt)
# XXX: we divide by np.max(x) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestIDSTIDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 1
class TestIDSTIFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestIDSTIInt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 4
self.type = 1
class TestIDSTIIDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 2
class TestIDSTIIFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 2
class TestIDSTIIInt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 2
class TestIDSTIIIDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestIDSTIIIFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 3
class TestIDSTIIIInt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 3
class TestIDSTIVDouble(_TestIDSTBase):
def setup_method(self):
self.rdt = np.double
self.dec = 12
self.type = 4
class TestIDSTIVFloat(_TestIDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 4
class TestIDSTIVnt(_TestIDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 6
self.type = 4
class TestOverwrite(object):
"""Check input overwrite behavior."""
real_dtypes = [np.float32, np.float64]
def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x, **kw):
x2 = x.copy()
routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x)
sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not overwrite_x:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
for type in [1, 2, 3, 4]:
for overwrite_x in [True, False]:
for norm in [None, 'ortho']:
self._check(data, routine, type, None, axis, norm,
overwrite_x)
def test_dct(self):
for dtype in self.real_dtypes:
self._check_1d(dct, dtype, (16,), -1)
self._check_1d(dct, dtype, (16, 2), 0)
self._check_1d(dct, dtype, (2, 16), 1)
def test_idct(self):
for dtype in self.real_dtypes:
self._check_1d(idct, dtype, (16,), -1)
self._check_1d(idct, dtype, (16, 2), 0)
self._check_1d(idct, dtype, (2, 16), 1)
def test_dst(self):
for dtype in self.real_dtypes:
self._check_1d(dst, dtype, (16,), -1)
self._check_1d(dst, dtype, (16, 2), 0)
self._check_1d(dst, dtype, (2, 16), 1)
def test_idst(self):
for dtype in self.real_dtypes:
self._check_1d(idst, dtype, (16,), -1)
self._check_1d(idst, dtype, (16, 2), 0)
self._check_1d(idst, dtype, (2, 16), 1)
class Test_DCTN_IDCTN(object):
dec = 14
dct_type = [1, 2, 3, 4]
norms = [None, 'ortho']
rstate = np.random.RandomState(1234)
shape = (32, 16)
data = rstate.randn(*shape)
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
@pytest.mark.parametrize('axes', [None,
1, (1,), [1],
0, (0,), [0],
(0, 1), [0, 1],
(-2, -1), [-2, -1]])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', ['ortho'])
def test_axes_round_trip(self, fforward, finverse, axes, dct_type, norm):
tmp = fforward(self.data, type=dct_type, axes=axes, norm=norm)
tmp = finverse(tmp, type=dct_type, axes=axes, norm=norm)
assert_array_almost_equal(self.data, tmp, decimal=12)
@pytest.mark.parametrize('fforward,fforward_ref', [(dctn, dct_2d_ref),
(dstn, dst_2d_ref)])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', norms)
def test_dctn_vs_2d_reference(self, fforward, fforward_ref,
dct_type, norm):
y1 = fforward(self.data, type=dct_type, axes=None, norm=norm)
y2 = fforward_ref(self.data, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
@pytest.mark.parametrize('finverse,finverse_ref', [(idctn, idct_2d_ref),
(idstn, idst_2d_ref)])
@pytest.mark.parametrize('dct_type', dct_type)
@pytest.mark.parametrize('norm', [None, 'ortho'])
def test_idctn_vs_2d_reference(self, finverse, finverse_ref,
dct_type, norm):
fdata = dctn(self.data, type=dct_type, norm=norm)
y1 = finverse(fdata, type=dct_type, norm=norm)
y2 = finverse_ref(fdata, type=dct_type, norm=norm)
assert_array_almost_equal(y1, y2, decimal=11)
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
def test_axes_and_shape(self, fforward, finverse):
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, shape=self.data.shape[0], axes=(0, 1))
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, shape=self.data.shape[0], axes=None)
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fforward(self.data, shape=self.data.shape, axes=0)
@pytest.mark.parametrize('fforward', [dctn, dstn])
def test_shape(self, fforward):
tmp = fforward(self.data, shape=(128, 128), axes=None)
assert_equal(tmp.shape, (128, 128))
@pytest.mark.parametrize('fforward,finverse', [(dctn, idctn),
(dstn, idstn)])
@pytest.mark.parametrize('axes', [1, (1,), [1],
0, (0,), [0]])
def test_shape_is_none_with_axes(self, fforward, finverse, axes):
tmp = fforward(self.data, shape=None, axes=axes, norm='ortho')
tmp = finverse(tmp, shape=None, axes=axes, norm='ortho')
assert_array_almost_equal(self.data, tmp, decimal=self.dec)
| bsd-3-clause |
fkarb/xltable | docs/conf.py | 3 | 7921 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# xltable documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 15 11:52:17 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'xltable'
copyright = '2013-2015, Renshaw Bay'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'xltabledoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'xltable.tex', 'xltable Documentation',
'Renshaw Bay', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'xltable', 'xltable Documentation',
['Renshaw Bay'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'xltable', 'xltable Documentation',
'Renshaw Bay', 'xltable', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| mit |
yuchangfu/pythonfun | flaskenv/Lib/encodings/mac_farsi.py | 593 | 15426 | """ Python Character Mapping Codec mac_farsi generated from 'MAPPINGS/VENDORS/APPLE/FARSI.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-farsi',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE, left-right
u'!' # 0x21 -> EXCLAMATION MARK, left-right
u'"' # 0x22 -> QUOTATION MARK, left-right
u'#' # 0x23 -> NUMBER SIGN, left-right
u'$' # 0x24 -> DOLLAR SIGN, left-right
u'%' # 0x25 -> PERCENT SIGN, left-right
u'&' # 0x26 -> AMPERSAND, left-right
u"'" # 0x27 -> APOSTROPHE, left-right
u'(' # 0x28 -> LEFT PARENTHESIS, left-right
u')' # 0x29 -> RIGHT PARENTHESIS, left-right
u'*' # 0x2A -> ASTERISK, left-right
u'+' # 0x2B -> PLUS SIGN, left-right
u',' # 0x2C -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
u'-' # 0x2D -> HYPHEN-MINUS, left-right
u'.' # 0x2E -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
u'/' # 0x2F -> SOLIDUS, left-right
u'0' # 0x30 -> DIGIT ZERO; in Arabic-script context, displayed as 0x06F0 EXTENDED ARABIC-INDIC DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE; in Arabic-script context, displayed as 0x06F1 EXTENDED ARABIC-INDIC DIGIT ONE
u'2' # 0x32 -> DIGIT TWO; in Arabic-script context, displayed as 0x06F2 EXTENDED ARABIC-INDIC DIGIT TWO
u'3' # 0x33 -> DIGIT THREE; in Arabic-script context, displayed as 0x06F3 EXTENDED ARABIC-INDIC DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR; in Arabic-script context, displayed as 0x06F4 EXTENDED ARABIC-INDIC DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE; in Arabic-script context, displayed as 0x06F5 EXTENDED ARABIC-INDIC DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX; in Arabic-script context, displayed as 0x06F6 EXTENDED ARABIC-INDIC DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x06F7 EXTENDED ARABIC-INDIC DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x06F8 EXTENDED ARABIC-INDIC DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE; in Arabic-script context, displayed as 0x06F9 EXTENDED ARABIC-INDIC DIGIT NINE
u':' # 0x3A -> COLON, left-right
u';' # 0x3B -> SEMICOLON, left-right
u'<' # 0x3C -> LESS-THAN SIGN, left-right
u'=' # 0x3D -> EQUALS SIGN, left-right
u'>' # 0x3E -> GREATER-THAN SIGN, left-right
u'?' # 0x3F -> QUESTION MARK, left-right
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET, left-right
u'\\' # 0x5C -> REVERSE SOLIDUS, left-right
u']' # 0x5D -> RIGHT SQUARE BRACKET, left-right
u'^' # 0x5E -> CIRCUMFLEX ACCENT, left-right
u'_' # 0x5F -> LOW LINE, left-right
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET, left-right
u'|' # 0x7C -> VERTICAL LINE, left-right
u'}' # 0x7D -> RIGHT CURLY BRACKET, left-right
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xa0' # 0x81 -> NO-BREAK SPACE, right-left
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u06ba' # 0x8B -> ARABIC LETTER NOON GHUNNA
u'\xab' # 0x8C -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\u2026' # 0x93 -> HORIZONTAL ELLIPSIS, right-left
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xbb' # 0x98 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0x9B -> DIVISION SIGN, right-left
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u' ' # 0xA0 -> SPACE, right-left
u'!' # 0xA1 -> EXCLAMATION MARK, right-left
u'"' # 0xA2 -> QUOTATION MARK, right-left
u'#' # 0xA3 -> NUMBER SIGN, right-left
u'$' # 0xA4 -> DOLLAR SIGN, right-left
u'\u066a' # 0xA5 -> ARABIC PERCENT SIGN
u'&' # 0xA6 -> AMPERSAND, right-left
u"'" # 0xA7 -> APOSTROPHE, right-left
u'(' # 0xA8 -> LEFT PARENTHESIS, right-left
u')' # 0xA9 -> RIGHT PARENTHESIS, right-left
u'*' # 0xAA -> ASTERISK, right-left
u'+' # 0xAB -> PLUS SIGN, right-left
u'\u060c' # 0xAC -> ARABIC COMMA
u'-' # 0xAD -> HYPHEN-MINUS, right-left
u'.' # 0xAE -> FULL STOP, right-left
u'/' # 0xAF -> SOLIDUS, right-left
u'\u06f0' # 0xB0 -> EXTENDED ARABIC-INDIC DIGIT ZERO, right-left (need override)
u'\u06f1' # 0xB1 -> EXTENDED ARABIC-INDIC DIGIT ONE, right-left (need override)
u'\u06f2' # 0xB2 -> EXTENDED ARABIC-INDIC DIGIT TWO, right-left (need override)
u'\u06f3' # 0xB3 -> EXTENDED ARABIC-INDIC DIGIT THREE, right-left (need override)
u'\u06f4' # 0xB4 -> EXTENDED ARABIC-INDIC DIGIT FOUR, right-left (need override)
u'\u06f5' # 0xB5 -> EXTENDED ARABIC-INDIC DIGIT FIVE, right-left (need override)
u'\u06f6' # 0xB6 -> EXTENDED ARABIC-INDIC DIGIT SIX, right-left (need override)
u'\u06f7' # 0xB7 -> EXTENDED ARABIC-INDIC DIGIT SEVEN, right-left (need override)
u'\u06f8' # 0xB8 -> EXTENDED ARABIC-INDIC DIGIT EIGHT, right-left (need override)
u'\u06f9' # 0xB9 -> EXTENDED ARABIC-INDIC DIGIT NINE, right-left (need override)
u':' # 0xBA -> COLON, right-left
u'\u061b' # 0xBB -> ARABIC SEMICOLON
u'<' # 0xBC -> LESS-THAN SIGN, right-left
u'=' # 0xBD -> EQUALS SIGN, right-left
u'>' # 0xBE -> GREATER-THAN SIGN, right-left
u'\u061f' # 0xBF -> ARABIC QUESTION MARK
u'\u274a' # 0xC0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
u'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
u'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
u'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
u'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
u'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
u'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
u'\u0627' # 0xC7 -> ARABIC LETTER ALEF
u'\u0628' # 0xC8 -> ARABIC LETTER BEH
u'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
u'\u062a' # 0xCA -> ARABIC LETTER TEH
u'\u062b' # 0xCB -> ARABIC LETTER THEH
u'\u062c' # 0xCC -> ARABIC LETTER JEEM
u'\u062d' # 0xCD -> ARABIC LETTER HAH
u'\u062e' # 0xCE -> ARABIC LETTER KHAH
u'\u062f' # 0xCF -> ARABIC LETTER DAL
u'\u0630' # 0xD0 -> ARABIC LETTER THAL
u'\u0631' # 0xD1 -> ARABIC LETTER REH
u'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
u'\u0633' # 0xD3 -> ARABIC LETTER SEEN
u'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
u'\u0635' # 0xD5 -> ARABIC LETTER SAD
u'\u0636' # 0xD6 -> ARABIC LETTER DAD
u'\u0637' # 0xD7 -> ARABIC LETTER TAH
u'\u0638' # 0xD8 -> ARABIC LETTER ZAH
u'\u0639' # 0xD9 -> ARABIC LETTER AIN
u'\u063a' # 0xDA -> ARABIC LETTER GHAIN
u'[' # 0xDB -> LEFT SQUARE BRACKET, right-left
u'\\' # 0xDC -> REVERSE SOLIDUS, right-left
u']' # 0xDD -> RIGHT SQUARE BRACKET, right-left
u'^' # 0xDE -> CIRCUMFLEX ACCENT, right-left
u'_' # 0xDF -> LOW LINE, right-left
u'\u0640' # 0xE0 -> ARABIC TATWEEL
u'\u0641' # 0xE1 -> ARABIC LETTER FEH
u'\u0642' # 0xE2 -> ARABIC LETTER QAF
u'\u0643' # 0xE3 -> ARABIC LETTER KAF
u'\u0644' # 0xE4 -> ARABIC LETTER LAM
u'\u0645' # 0xE5 -> ARABIC LETTER MEEM
u'\u0646' # 0xE6 -> ARABIC LETTER NOON
u'\u0647' # 0xE7 -> ARABIC LETTER HEH
u'\u0648' # 0xE8 -> ARABIC LETTER WAW
u'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
u'\u064a' # 0xEA -> ARABIC LETTER YEH
u'\u064b' # 0xEB -> ARABIC FATHATAN
u'\u064c' # 0xEC -> ARABIC DAMMATAN
u'\u064d' # 0xED -> ARABIC KASRATAN
u'\u064e' # 0xEE -> ARABIC FATHA
u'\u064f' # 0xEF -> ARABIC DAMMA
u'\u0650' # 0xF0 -> ARABIC KASRA
u'\u0651' # 0xF1 -> ARABIC SHADDA
u'\u0652' # 0xF2 -> ARABIC SUKUN
u'\u067e' # 0xF3 -> ARABIC LETTER PEH
u'\u0679' # 0xF4 -> ARABIC LETTER TTEH
u'\u0686' # 0xF5 -> ARABIC LETTER TCHEH
u'\u06d5' # 0xF6 -> ARABIC LETTER AE
u'\u06a4' # 0xF7 -> ARABIC LETTER VEH
u'\u06af' # 0xF8 -> ARABIC LETTER GAF
u'\u0688' # 0xF9 -> ARABIC LETTER DDAL
u'\u0691' # 0xFA -> ARABIC LETTER RREH
u'{' # 0xFB -> LEFT CURLY BRACKET, right-left
u'|' # 0xFC -> VERTICAL LINE, right-left
u'}' # 0xFD -> RIGHT CURLY BRACKET, right-left
u'\u0698' # 0xFE -> ARABIC LETTER JEH
u'\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
kmiddleton/dewarp_video | setupCV.py | 1 | 1758 | # -*- coding: utf-8 -*-
def setupCV(cameraMatrixFile, distortionCoefsFile):
"""
Load camera matrix and distortion coefficients.
Function to load the camera matrix and distortion coefficients
from saved files and returning them in the proper format.
Args:
cameraMatrixFile (string): Path to saved camera matrix file
distortionCoefsFile (string): Path to saved distortion coefficients
file
Returns:
camera_matrix (matrix): Camera matrix in the proper format
dist_coeffs (matrix): Distortion coefficients in the proper format
"""
import cv
from numpy import genfromtxt
camMat = genfromtxt(cameraMatrixFile, delimiter=',')
distCoefs = genfromtxt(distortionCoefsFile, delimiter=',')
# Create empty matrix and populate with data from camMat
camera_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
cv.SetReal2D(camera_matrix, 0, 0, camMat[0, 0])
cv.SetReal2D(camera_matrix, 0, 1, camMat[0, 1])
cv.SetReal2D(camera_matrix, 0, 2, camMat[0, 2])
cv.SetReal2D(camera_matrix, 1, 0, camMat[1, 0])
cv.SetReal2D(camera_matrix, 1, 1, camMat[1, 1])
cv.SetReal2D(camera_matrix, 1, 2, camMat[1, 2])
cv.SetReal2D(camera_matrix, 2, 0, camMat[2, 0])
cv.SetReal2D(camera_matrix, 2, 1, camMat[2, 1])
cv.SetReal2D(camera_matrix, 2, 2, camMat[2, 2])
# Create empty matrix and populate with data from distCoefs
dist_coeffs = cv.CreateMat(1, 5, cv.CV_32FC1)
cv.SetReal2D(dist_coeffs, 0, 0, distCoefs[0])
cv.SetReal2D(dist_coeffs, 0, 1, distCoefs[1])
cv.SetReal2D(dist_coeffs, 0, 2, distCoefs[2])
cv.SetReal2D(dist_coeffs, 0, 3, distCoefs[3])
cv.SetReal2D(dist_coeffs, 0, 4, distCoefs[4])
return [camera_matrix, dist_coeffs]
| cc0-1.0 |
richardcs/ansible | lib/ansible/plugins/lookup/nios.py | 80 | 4057 | #
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
lookup: nios
version_added: "2.5"
short_description: Query Infoblox NIOS objects
description:
- Uses the Infoblox WAPI API to fetch NIOS specified objects. This lookup
supports adding additional keywords to filter the return data and specify
the desired set of returned fields.
requirements:
- infoblox-client
extends_documentation_fragment: nios
options:
_terms:
description: The name of the object to return from NIOS
required: True
return_fields:
description: The list of field names to return for the specified object.
filter:
description: a dict object that is used to filter the return objects
extattrs:
description: a dict object that is used to filter on extattrs
"""
EXAMPLES = """
- name: fetch all networkview objects
set_fact:
networkviews: "{{ lookup('nios', 'networkview', provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
- name: fetch the default dns view
set_fact:
dns_views: "{{ lookup('nios', 'view', filter={'name': 'default'}, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
# all of the examples below use credentials that are set using env variables
# export INFOBLOX_HOST=nios01
# export INFOBLOX_USERNAME=admin
# export INFOBLOX_PASSWORD=admin
- name: fetch all host records and include extended attributes
set_fact:
host_records: "{{ lookup('nios', 'record:host', return_fields=['extattrs', 'name', 'view', 'comment']}) }}"
- name: use env variables to pass credentials
set_fact:
networkviews: "{{ lookup('nios', 'networkview') }}"
- name: get a host record
set_fact:
host: "{{ lookup('nios', 'record:host', filter={'name': 'hostname.ansible.com'}) }}"
- name: get the authoritative zone from a non default dns view
set_fact:
host: "{{ lookup('nios', 'zone_auth', filter={'fqdn': 'ansible.com', 'view': 'ansible-dns'}) }}"
"""
RETURN = """
obj_type:
description:
- The object type specified in the terms argument
returned: always
type: complex
contains:
obj_field:
- One or more obj_type fields as specified by return_fields argument or
the default set of fields as per the object type
"""
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.net_tools.nios.api import WapiLookup
from ansible.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs
from ansible.errors import AnsibleError
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
try:
obj_type = terms[0]
except IndexError:
raise AnsibleError('the object_type must be specified')
return_fields = kwargs.pop('return_fields', None)
filter_data = kwargs.pop('filter', {})
extattrs = normalize_extattrs(kwargs.pop('extattrs', {}))
provider = kwargs.pop('provider', {})
wapi = WapiLookup(provider)
res = wapi.get_object(obj_type, filter_data, return_fields=return_fields, extattrs=extattrs)
if res is not None:
for obj in res:
if 'extattrs' in obj:
obj['extattrs'] = flatten_extattrs(obj['extattrs'])
else:
res = []
return res
| gpl-3.0 |
whitzhu/fle-home | fle_site/apps/articles/migrations/0003_auto__add_field_tag_slug.py | 10 | 8057 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Tag.slug'
db.add_column('articles_tag', 'slug', self.gf('django.db.models.fields.CharField')(default='', max_length=64, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Tag.slug'
db.delete_column('articles_tag', 'slug')
models = {
'articles.article': {
'Meta': {'ordering': "('-publish_date', 'title')", 'object_name': 'Article'},
'addthis_use_author': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'addthis_username': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'auto_tag': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followup_for': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followups'", 'blank': 'True', 'to': "orm['articles.Article']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'markup': ('django.db.models.fields.CharField', [], {'default': "'h'", 'max_length': '1'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'related_articles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_articles_rel_+'", 'blank': 'True', 'to': "orm['articles.Article']"}),
'rendered_content': ('django.db.models.fields.TextField', [], {}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['articles.ArticleStatus']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['articles.Tag']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'use_addthis_button': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'articles.articlestatus': {
'Meta': {'ordering': "('ordering', 'name')", 'object_name': 'ArticleStatus'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_live': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'articles.attachment': {
'Meta': {'ordering': "('-article', 'id')", 'object_name': 'Attachment'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['articles.Article']"}),
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'articles.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['articles']
| mit |
kunalghosh/T-61.6050-Special-Course-in-Deep-Learning | exercises/utils/mlp.py | 1 | 1926 | import theano.tensor as T
from logistic_reg import LogisticRegression
from hiddenLayer import HiddenLayer
class MLP(object):
def __init__(self, rng, inpt, layers, scale=1):
'''
len(layers) must be atleast 3
input -> hidden -> output
but it can be more as well.
'''
self.hidden_layers = []
old_inpt = inpt
self.params = []
for prevLayer, layer in zip(layers[:-1],layers[1:-1]):
# if layers = [784,225,144,10]
# then we want [(784,225) (225,144)]
hiddenLayer = HiddenLayer(
rng = rng,
inpt = old_inpt,
n_in = prevLayer,
n_out = layer,
activation = T.tanh,
scale = scale
)
# print (prevLayer, layer)
old_inpt = hiddenLayer.output
self.hidden_layers.append(hiddenLayer)
self.params.extend(hiddenLayer.params)
self.logisticRegressionLayer = LogisticRegression(
inpt = self.hidden_layers[-1].output,
# n_in = layers[-1],
n_in = layers[-2],
n_out = layers[-1]
# n_out = layers[-2]
)
self.L2_sqr = (
sum([(hiddenLayer.W ** 2).sum() for hiddenLayer in self.hidden_layers])
+ (self.logisticRegressionLayer.W ** 2).sum()
)
self.negative_log_likelihood = (
self.logisticRegressionLayer.negative_log_likelihood
)
self.errors = self.logisticRegressionLayer.get_error
self.params.extend(self.logisticRegressionLayer.params)
| mit |
rpmcpp/Audacity | lib-src/lv2/sord/waflib/Tools/python.py | 61 | 13210 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys
from waflib import Utils,Options,Errors,Logs
from waflib.TaskGen import extension,before_method,after_method,feature
from waflib.Configure import conf
FRAG='''
#include <Python.h>
#ifdef __cplusplus
extern "C" {
#endif
void Py_Initialize(void);
void Py_Finalize(void);
#ifdef __cplusplus
}
#endif
int main(int argc, char **argv)
{
(void)argc; (void)argv;
Py_Initialize();
Py_Finalize();
return 0;
}
'''
INST='''
import sys, py_compile
py_compile.compile(sys.argv[1], sys.argv[2], sys.argv[3])
'''
DISTUTILS_IMP=['from distutils.sysconfig import get_config_var, get_python_lib']
@extension('.py')
def process_py(self,node):
try:
if not self.bld.is_install:
return
except AttributeError:
return
try:
if not self.install_path:
return
except AttributeError:
self.install_path='${PYTHONDIR}'
def inst_py(ctx):
install_from=getattr(self,'install_from',None)
if install_from:
install_from=self.path.find_dir(install_from)
install_pyfile(self,node,install_from)
self.bld.add_post_fun(inst_py)
def install_pyfile(self,node,install_from=None):
from_node=install_from or node.parent
tsk=self.bld.install_as(self.install_path+'/'+node.path_from(from_node),node,postpone=False)
path=tsk.get_install_path()
if self.bld.is_install<0:
Logs.info("+ removing byte compiled python files")
for x in'co':
try:
os.remove(path+x)
except OSError:
pass
if self.bld.is_install>0:
try:
st1=os.stat(path)
except OSError:
Logs.error('The python file is missing, this should not happen')
for x in['c','o']:
do_inst=self.env['PY'+x.upper()]
try:
st2=os.stat(path+x)
except OSError:
pass
else:
if st1.st_mtime<=st2.st_mtime:
do_inst=False
if do_inst:
lst=(x=='o')and[self.env['PYFLAGS_OPT']]or[]
(a,b,c)=(path,path+x,tsk.get_install_path(destdir=False)+x)
argv=self.env['PYTHON']+lst+['-c',INST,a,b,c]
Logs.info('+ byte compiling %r'%(path+x))
env=self.env.env or None
ret=Utils.subprocess.Popen(argv,env=env).wait()
if ret:
raise Errors.WafError('py%s compilation failed %r'%(x,path))
@feature('py')
def feature_py(self):
pass
@feature('pyext')
@before_method('propagate_uselib_vars','apply_link')
@after_method('apply_bundle')
def init_pyext(self):
self.uselib=self.to_list(getattr(self,'uselib',[]))
if not'PYEXT'in self.uselib:
self.uselib.append('PYEXT')
self.env.cshlib_PATTERN=self.env.cxxshlib_PATTERN=self.env.macbundle_PATTERN=self.env.pyext_PATTERN
self.env.fcshlib_PATTERN=self.env.dshlib_PATTERN=self.env.pyext_PATTERN
try:
if not self.install_path:
return
except AttributeError:
self.install_path='${PYTHONARCHDIR}'
@feature('pyext')
@before_method('apply_link','apply_bundle')
def set_bundle(self):
if Utils.unversioned_sys_platform()=='darwin':
self.mac_bundle=True
@before_method('propagate_uselib_vars')
@feature('pyembed')
def init_pyembed(self):
self.uselib=self.to_list(getattr(self,'uselib',[]))
if not'PYEMBED'in self.uselib:
self.uselib.append('PYEMBED')
@conf
def get_python_variables(self,variables,imports=None):
if not imports:
try:
imports=self.python_imports
except AttributeError:
imports=DISTUTILS_IMP
program=list(imports)
program.append('')
for v in variables:
program.append("print(repr(%s))"%v)
os_env=dict(os.environ)
try:
del os_env['MACOSX_DEPLOYMENT_TARGET']
except KeyError:
pass
try:
out=self.cmd_and_log(self.env.PYTHON+['-c','\n'.join(program)],env=os_env)
except Errors.WafError:
self.fatal('The distutils module is unusable: install "python-devel"?')
self.to_log(out)
return_values=[]
for s in out.split('\n'):
s=s.strip()
if not s:
continue
if s=='None':
return_values.append(None)
elif(s[0]=="'"and s[-1]=="'")or(s[0]=='"'and s[-1]=='"'):
return_values.append(eval(s))
elif s[0].isdigit():
return_values.append(int(s))
else:break
return return_values
@conf
def check_python_headers(conf):
env=conf.env
if not env['CC_NAME']and not env['CXX_NAME']:
conf.fatal('load a compiler first (gcc, g++, ..)')
if not env['PYTHON_VERSION']:
conf.check_python_version()
pybin=conf.env.PYTHON
if not pybin:
conf.fatal('Could not find the python executable')
v='prefix SO LDFLAGS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET LDSHARED CFLAGS'.split()
try:
lst=conf.get_python_variables(["get_config_var('%s') or ''"%x for x in v])
except RuntimeError:
conf.fatal("Python development headers not found (-v for details).")
vals=['%s = %r'%(x,y)for(x,y)in zip(v,lst)]
conf.to_log("Configuration returned from %r:\n%r\n"%(pybin,'\n'.join(vals)))
dct=dict(zip(v,lst))
x='MACOSX_DEPLOYMENT_TARGET'
if dct[x]:
conf.env[x]=conf.environ[x]=dct[x]
env['pyext_PATTERN']='%s'+dct['SO']
all_flags=dct['LDFLAGS']+' '+dct['CFLAGS']
conf.parse_flags(all_flags,'PYEMBED')
all_flags=dct['LDFLAGS']+' '+dct['LDSHARED']+' '+dct['CFLAGS']
conf.parse_flags(all_flags,'PYEXT')
result=None
for name in('python'+env['PYTHON_VERSION'],'python'+env['PYTHON_VERSION'].replace('.','')):
if not result and env['LIBPATH_PYEMBED']:
path=env['LIBPATH_PYEMBED']
conf.to_log("\n\n# Trying default LIBPATH_PYEMBED: %r\n"%path)
result=conf.check(lib=name,uselib='PYEMBED',libpath=path,mandatory=False,msg='Checking for library %s in LIBPATH_PYEMBED'%name)
if not result and dct['LIBDIR']:
path=[dct['LIBDIR']]
conf.to_log("\n\n# try again with -L$python_LIBDIR: %r\n"%path)
result=conf.check(lib=name,uselib='PYEMBED',libpath=path,mandatory=False,msg='Checking for library %s in LIBDIR'%name)
if not result and dct['LIBPL']:
path=[dct['LIBPL']]
conf.to_log("\n\n# try again with -L$python_LIBPL (some systems don't install the python library in $prefix/lib)\n")
result=conf.check(lib=name,uselib='PYEMBED',libpath=path,mandatory=False,msg='Checking for library %s in python_LIBPL'%name)
if not result:
path=[os.path.join(dct['prefix'],"libs")]
conf.to_log("\n\n# try again with -L$prefix/libs, and pythonXY name rather than pythonX.Y (win32)\n")
result=conf.check(lib=name,uselib='PYEMBED',libpath=path,mandatory=False,msg='Checking for library %s in $prefix/libs'%name)
if result:
break
if result:
env['LIBPATH_PYEMBED']=path
env.append_value('LIB_PYEMBED',[name])
else:
conf.to_log("\n\n### LIB NOT FOUND\n")
if(Utils.is_win32 or sys.platform.startswith('os2')or dct['Py_ENABLE_SHARED']):
env['LIBPATH_PYEXT']=env['LIBPATH_PYEMBED']
env['LIB_PYEXT']=env['LIB_PYEMBED']
num='.'.join(env['PYTHON_VERSION'].split('.')[:2])
conf.find_program([''.join(pybin)+'-config','python%s-config'%num,'python-config-%s'%num,'python%sm-config'%num],var='PYTHON_CONFIG',mandatory=False)
includes=[]
if conf.env.PYTHON_CONFIG:
for incstr in conf.cmd_and_log([conf.env.PYTHON_CONFIG,'--includes']).strip().split():
if(incstr.startswith('-I')or incstr.startswith('/I')):
incstr=incstr[2:]
if incstr not in includes:
includes.append(incstr)
conf.to_log("Include path for Python extensions (found via python-config --includes): %r\n"%(includes,))
env['INCLUDES_PYEXT']=includes
env['INCLUDES_PYEMBED']=includes
else:
conf.to_log("Include path for Python extensions ""(found via distutils module): %r\n"%(dct['INCLUDEPY'],))
env['INCLUDES_PYEXT']=[dct['INCLUDEPY']]
env['INCLUDES_PYEMBED']=[dct['INCLUDEPY']]
if env['CC_NAME']=='gcc':
env.append_value('CFLAGS_PYEMBED',['-fno-strict-aliasing'])
env.append_value('CFLAGS_PYEXT',['-fno-strict-aliasing'])
if env['CXX_NAME']=='gcc':
env.append_value('CXXFLAGS_PYEMBED',['-fno-strict-aliasing'])
env.append_value('CXXFLAGS_PYEXT',['-fno-strict-aliasing'])
if env.CC_NAME=="msvc":
from distutils.msvccompiler import MSVCCompiler
dist_compiler=MSVCCompiler()
dist_compiler.initialize()
env.append_value('CFLAGS_PYEXT',dist_compiler.compile_options)
env.append_value('CXXFLAGS_PYEXT',dist_compiler.compile_options)
env.append_value('LINKFLAGS_PYEXT',dist_compiler.ldflags_shared)
try:
conf.check(header_name='Python.h',define_name='HAVE_PYTHON_H',uselib='PYEMBED',fragment=FRAG,errmsg=':-(')
except conf.errors.ConfigurationError:
xx=conf.env.CXX_NAME and'cxx'or'c'
conf.check_cfg(msg='Asking python-config for the flags (pyembed)',path=conf.env.PYTHON_CONFIG,package='',uselib_store='PYEMBED',args=['--cflags','--libs','--ldflags'])
conf.check(header_name='Python.h',define_name='HAVE_PYTHON_H',msg='Getting pyembed flags from python-config',fragment=FRAG,errmsg='Could not build a python embedded interpreter',features='%s %sprogram pyembed'%(xx,xx))
conf.check_cfg(msg='Asking python-config for the flags (pyext)',path=conf.env.PYTHON_CONFIG,package='',uselib_store='PYEXT',args=['--cflags','--libs','--ldflags'])
conf.check(header_name='Python.h',define_name='HAVE_PYTHON_H',msg='Getting pyext flags from python-config',features='%s %sshlib pyext'%(xx,xx),fragment=FRAG,errmsg='Could not build python extensions')
@conf
def check_python_version(conf,minver=None):
assert minver is None or isinstance(minver,tuple)
pybin=conf.env['PYTHON']
if not pybin:
conf.fatal('could not find the python executable')
cmd=pybin+['-c','import sys\nfor x in sys.version_info: print(str(x))']
Logs.debug('python: Running python command %r'%cmd)
lines=conf.cmd_and_log(cmd).split()
assert len(lines)==5,"found %i lines, expected 5: %r"%(len(lines),lines)
pyver_tuple=(int(lines[0]),int(lines[1]),int(lines[2]),lines[3],int(lines[4]))
result=(minver is None)or(pyver_tuple>=minver)
if result:
pyver='.'.join([str(x)for x in pyver_tuple[:2]])
conf.env['PYTHON_VERSION']=pyver
if'PYTHONDIR'in conf.environ:
pydir=conf.environ['PYTHONDIR']
else:
if Utils.is_win32:
(python_LIBDEST,pydir)=conf.get_python_variables(["get_config_var('LIBDEST') or ''","get_python_lib(standard_lib=0, prefix=%r) or ''"%conf.env['PREFIX']])
else:
python_LIBDEST=None
(pydir,)=conf.get_python_variables(["get_python_lib(standard_lib=0, prefix=%r) or ''"%conf.env['PREFIX']])
if python_LIBDEST is None:
if conf.env['LIBDIR']:
python_LIBDEST=os.path.join(conf.env['LIBDIR'],"python"+pyver)
else:
python_LIBDEST=os.path.join(conf.env['PREFIX'],"lib","python"+pyver)
if'PYTHONARCHDIR'in conf.environ:
pyarchdir=conf.environ['PYTHONARCHDIR']
else:
(pyarchdir,)=conf.get_python_variables(["get_python_lib(plat_specific=1, standard_lib=0, prefix=%r) or ''"%conf.env['PREFIX']])
if not pyarchdir:
pyarchdir=pydir
if hasattr(conf,'define'):
conf.define('PYTHONDIR',pydir)
conf.define('PYTHONARCHDIR',pyarchdir)
conf.env['PYTHONDIR']=pydir
conf.env['PYTHONARCHDIR']=pyarchdir
pyver_full='.'.join(map(str,pyver_tuple[:3]))
if minver is None:
conf.msg('Checking for python version',pyver_full)
else:
minver_str='.'.join(map(str,minver))
conf.msg('Checking for python version',pyver_tuple,">= %s"%(minver_str,)and'GREEN'or'YELLOW')
if not result:
conf.fatal('The python version is too old, expecting %r'%(minver,))
PYTHON_MODULE_TEMPLATE='''
import %s as current_module
version = getattr(current_module, '__version__', None)
if version is not None:
print(str(version))
else:
print('unknown version')
'''
@conf
def check_python_module(conf,module_name,condition=''):
msg='Python module %s'%module_name
if condition:
msg='%s (%s)'%(msg,condition)
conf.start_msg(msg)
try:
ret=conf.cmd_and_log(conf.env['PYTHON']+['-c',PYTHON_MODULE_TEMPLATE%module_name])
except Exception:
conf.end_msg(False)
conf.fatal('Could not find the python module %r'%module_name)
ret=ret.strip()
if condition:
conf.end_msg(ret)
if ret=='unknown version':
conf.fatal('Could not check the %s version'%module_name)
from distutils.version import LooseVersion
def num(*k):
if isinstance(k[0],int):
return LooseVersion('.'.join([str(x)for x in k]))
else:
return LooseVersion(k[0])
d={'num':num,'ver':LooseVersion(ret)}
ev=eval(condition,{},d)
if not ev:
conf.fatal('The %s version does not satisfy the requirements'%module_name)
else:
if ret=='unknown version':
conf.end_msg(True)
else:
conf.end_msg(ret)
def configure(conf):
try:
conf.find_program('python',var='PYTHON')
except conf.errors.ConfigurationError:
Logs.warn("could not find a python executable, setting to sys.executable '%s'"%sys.executable)
conf.env.PYTHON=sys.executable
if conf.env.PYTHON!=sys.executable:
Logs.warn("python executable %r differs from system %r"%(conf.env.PYTHON,sys.executable))
conf.env.PYTHON=conf.cmd_to_list(conf.env.PYTHON)
v=conf.env
v['PYCMD']='"import sys, py_compile;py_compile.compile(sys.argv[1], sys.argv[2])"'
v['PYFLAGS']=''
v['PYFLAGS_OPT']='-O'
v['PYC']=getattr(Options.options,'pyc',1)
v['PYO']=getattr(Options.options,'pyo',1)
def options(opt):
opt.add_option('--nopyc',action='store_false',default=1,help='Do not install bytecode compiled .pyc files (configuration) [Default:install]',dest='pyc')
opt.add_option('--nopyo',action='store_false',default=1,help='Do not install optimised compiled .pyo files (configuration) [Default:install]',dest='pyo')
| gpl-2.0 |
weimingtom/python-for-android | python3-alpha/python3-src/Lib/test/test_hashlib.py | 47 | 14178 | # Test hashlib module
#
# $Id$
#
# Copyright (C) 2005-2010 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
#
import array
import hashlib
import itertools
import sys
try:
import threading
except ImportError:
threading = None
import unittest
import warnings
from test import support
from test.support import _4G, precisionbigmemtest
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = hasattr(sys, 'gettotalrefcount')
def hexstr(s):
assert isinstance(s, bytes), repr(s)
h = "0123456789abcdef"
r = ''
for i in s:
r += h[(i >> 4) & 0xF] + h[i & 0xF]
return r
class HashLibTestCase(unittest.TestCase):
supported_hash_names = ( 'md5', 'MD5', 'sha1', 'SHA1',
'sha224', 'SHA224', 'sha256', 'SHA256',
'sha384', 'SHA384', 'sha512', 'SHA512' )
_warn_on_extension_import = COMPILED_WITH_PYDEBUG
def _conditional_import_module(self, module_name):
"""Import a module and return a reference to it or None on failure."""
try:
exec('import '+module_name)
except ImportError as error:
if self._warn_on_extension_import:
warnings.warn('Did a C extension fail to compile? %s' % error)
return locals().get(module_name)
def __init__(self, *args, **kwargs):
algorithms = set()
for algorithm in self.supported_hash_names:
algorithms.add(algorithm.lower())
self.constructors_to_test = {}
for algorithm in algorithms:
self.constructors_to_test[algorithm] = set()
# For each algorithm, test the direct constructor and the use
# of hashlib.new given the algorithm name.
for algorithm, constructors in self.constructors_to_test.items():
constructors.add(getattr(hashlib, algorithm))
def _test_algorithm_via_hashlib_new(data=None, _alg=algorithm):
if data is None:
return hashlib.new(_alg)
return hashlib.new(_alg, data)
constructors.add(_test_algorithm_via_hashlib_new)
_hashlib = self._conditional_import_module('_hashlib')
if _hashlib:
# These two algorithms should always be present when this module
# is compiled. If not, something was compiled wrong.
assert hasattr(_hashlib, 'openssl_md5')
assert hasattr(_hashlib, 'openssl_sha1')
for algorithm, constructors in self.constructors_to_test.items():
constructor = getattr(_hashlib, 'openssl_'+algorithm, None)
if constructor:
constructors.add(constructor)
_md5 = self._conditional_import_module('_md5')
if _md5:
self.constructors_to_test['md5'].add(_md5.md5)
_sha1 = self._conditional_import_module('_sha1')
if _sha1:
self.constructors_to_test['sha1'].add(_sha1.sha1)
_sha256 = self._conditional_import_module('_sha256')
if _sha256:
self.constructors_to_test['sha224'].add(_sha256.sha224)
self.constructors_to_test['sha256'].add(_sha256.sha256)
_sha512 = self._conditional_import_module('_sha512')
if _sha512:
self.constructors_to_test['sha384'].add(_sha512.sha384)
self.constructors_to_test['sha512'].add(_sha512.sha512)
super(HashLibTestCase, self).__init__(*args, **kwargs)
def test_hash_array(self):
a = array.array("b", range(10))
constructors = self.constructors_to_test.values()
for cons in itertools.chain.from_iterable(constructors):
c = cons(a)
c.hexdigest()
def test_algorithms_guaranteed(self):
self.assertEqual(hashlib.algorithms_guaranteed,
set(_algo for _algo in self.supported_hash_names
if _algo.islower()))
def test_algorithms_available(self):
self.assertTrue(set(hashlib.algorithms_guaranteed).
issubset(hashlib.algorithms_available))
def test_unknown_hash(self):
try:
hashlib.new('spam spam spam spam spam')
except ValueError:
pass
else:
self.assertTrue(0 == "hashlib didn't reject bogus hash name")
def test_get_builtin_constructor(self):
get_builtin_constructor = hashlib.__dict__[
'__get_builtin_constructor']
self.assertRaises(ValueError, get_builtin_constructor, 'test')
try:
import _md5
except ImportError:
pass
# This forces an ImportError for "import _md5" statements
sys.modules['_md5'] = None
try:
self.assertRaises(ValueError, get_builtin_constructor, 'md5')
finally:
if '_md5' in locals():
sys.modules['_md5'] = _md5
else:
del sys.modules['_md5']
def test_hexdigest(self):
for name in self.supported_hash_names:
h = hashlib.new(name)
assert isinstance(h.digest(), bytes), name
self.assertEqual(hexstr(h.digest()), h.hexdigest())
def test_large_update(self):
aas = b'a' * 128
bees = b'b' * 127
cees = b'c' * 126
for name in self.supported_hash_names:
m1 = hashlib.new(name)
m1.update(aas)
m1.update(bees)
m1.update(cees)
m2 = hashlib.new(name)
m2.update(aas + bees + cees)
self.assertEqual(m1.digest(), m2.digest())
def check(self, name, data, digest):
constructors = self.constructors_to_test[name]
# 2 is for hashlib.name(...) and hashlib.new(name, ...)
self.assertGreaterEqual(len(constructors), 2)
for hash_object_constructor in constructors:
computed = hash_object_constructor(data).hexdigest()
self.assertEqual(
computed, digest,
"Hash algorithm %s constructed using %s returned hexdigest"
" %r for %d byte input data that should have hashed to %r."
% (name, hash_object_constructor,
computed, len(data), digest))
def check_no_unicode(self, algorithm_name):
# Unicode objects are not allowed as input.
constructors = self.constructors_to_test[algorithm_name]
for hash_object_constructor in constructors:
self.assertRaises(TypeError, hash_object_constructor, 'spam')
def test_no_unicode(self):
self.check_no_unicode('md5')
self.check_no_unicode('sha1')
self.check_no_unicode('sha224')
self.check_no_unicode('sha256')
self.check_no_unicode('sha384')
self.check_no_unicode('sha512')
def test_case_md5_0(self):
self.check('md5', b'', 'd41d8cd98f00b204e9800998ecf8427e')
def test_case_md5_1(self):
self.check('md5', b'abc', '900150983cd24fb0d6963f7d28e17f72')
def test_case_md5_2(self):
self.check('md5',
b'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'd174ab98d277d9f5a5611c2c9f419d9f')
@precisionbigmemtest(size=_4G + 5, memuse=1)
def test_case_md5_huge(self, size):
if size == _4G + 5:
try:
self.check('md5', b'A'*size, 'c9af2dff37468ce5dfee8f2cfc0a9c6d')
except OverflowError:
pass # 32-bit arch
@precisionbigmemtest(size=_4G - 1, memuse=1)
def test_case_md5_uintmax(self, size):
if size == _4G - 1:
try:
self.check('md5', b'A'*size, '28138d306ff1b8281f1a9067e1a1a2b3')
except OverflowError:
pass # 32-bit arch
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
def test_case_sha1_0(self):
self.check('sha1', b"",
"da39a3ee5e6b4b0d3255bfef95601890afd80709")
def test_case_sha1_1(self):
self.check('sha1', b"abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_sha1_2(self):
self.check('sha1',
b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_sha1_3(self):
self.check('sha1', b"a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
# use the examples from Federal Information Processing Standards
# Publication 180-2, Secure Hash Standard, 2002 August 1
# http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
def test_case_sha224_0(self):
self.check('sha224', b"",
"d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f")
def test_case_sha224_1(self):
self.check('sha224', b"abc",
"23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7")
def test_case_sha224_2(self):
self.check('sha224',
b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"75388b16512776cc5dba5da1fd890150b0c6455cb4f58b1952522525")
def test_case_sha224_3(self):
self.check('sha224', b"a" * 1000000,
"20794655980c91d8bbb4c1ea97618a4bf03f42581948b2ee4ee7ad67")
def test_case_sha256_0(self):
self.check('sha256', b"",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
def test_case_sha256_1(self):
self.check('sha256', b"abc",
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad")
def test_case_sha256_2(self):
self.check('sha256',
b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1")
def test_case_sha256_3(self):
self.check('sha256', b"a" * 1000000,
"cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0")
def test_case_sha384_0(self):
self.check('sha384', b"",
"38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da"+
"274edebfe76f65fbd51ad2f14898b95b")
def test_case_sha384_1(self):
self.check('sha384', b"abc",
"cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed"+
"8086072ba1e7cc2358baeca134c825a7")
def test_case_sha384_2(self):
self.check('sha384',
b"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
b"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"09330c33f71147e83d192fc782cd1b4753111b173b3b05d22fa08086e3b0f712"+
"fcc7c71a557e2db966c3e9fa91746039")
def test_case_sha384_3(self):
self.check('sha384', b"a" * 1000000,
"9d0e1809716474cb086e834e310a4a1ced149e9c00f248527972cec5704c2a5b"+
"07b8b3dc38ecc4ebae97ddd87f3d8985")
def test_case_sha512_0(self):
self.check('sha512', b"",
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce"+
"47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e")
def test_case_sha512_1(self):
self.check('sha512', b"abc",
"ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a"+
"2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f")
def test_case_sha512_2(self):
self.check('sha512',
b"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
b"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018"+
"501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909")
def test_case_sha512_3(self):
self.check('sha512', b"a" * 1000000,
"e718483d0ce769644e2e42c7bc15b4638e1f98b13b2044285632a803afa973eb"+
"de0ff244877ea60a4cb0432ce577c31beb009c5c2c49aa2e4eadb217ad8cc09b")
def test_gil(self):
# Check things work fine with an input larger than the size required
# for multithreaded operation (which is hardwired to 2048).
gil_minsize = 2048
m = hashlib.md5()
m.update(b'1')
m.update(b'#' * gil_minsize)
m.update(b'1')
self.assertEqual(m.hexdigest(), 'cb1e1a2cbc80be75e19935d621fb9b21')
m = hashlib.md5(b'x' * gil_minsize)
self.assertEqual(m.hexdigest(), 'cfb767f225d58469c5de3632a8803958')
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_threaded_hashing(self):
# Updating the same hash object from several threads at once
# using data chunk sizes containing the same byte sequences.
#
# If the internal locks are working to prevent multiple
# updates on the same object from running at once, the resulting
# hash will be the same as doing it single threaded upfront.
hasher = hashlib.sha1()
num_threads = 5
smallest_data = b'swineflu'
data = smallest_data*200000
expected_hash = hashlib.sha1(data*num_threads).hexdigest()
def hash_in_chunks(chunk_size, event):
index = 0
while index < len(data):
hasher.update(data[index:index+chunk_size])
index += chunk_size
event.set()
events = []
for threadnum in range(num_threads):
chunk_size = len(data) // (10**threadnum)
assert chunk_size > 0
assert chunk_size % len(smallest_data) == 0
event = threading.Event()
events.append(event)
threading.Thread(target=hash_in_chunks,
args=(chunk_size, event)).start()
for event in events:
event.wait()
self.assertEqual(expected_hash, hasher.hexdigest())
def test_main():
support.run_unittest(HashLibTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
tectronics/romcollectionbrowser | resources/lib/xbmc.py | 10 | 7106 | """
This is a dummy file for testing and debugging XBMC plugins from the
command line. The file contains definitions for the functions found
in xbmc, xbmcgui and xbmcplugin built in modules
"""
import os
_loglevel = 1
_settings = {'external_filemanaging': 'true'}
_filename = 'dummy.log'
_logentry = 0
def _write_to_file(msg):
global _filename
global _logentry
filepath = os.path.join( os.getcwd(), _filename )
if _logentry == 0: mode = 'w'
else: mode = 'a'
fh = open(filepath, mode)
fh.write( '%003d -- %s\n' % ( _logentry, msg ) )
fh.close()
_logentry += 1
# ------------------------------------------------------------------------------
# xbmc module functions and constants
# ------------------------------------------------------------------------------
LOGNONE = -1
LOGNORMAL = 0
LOGDEBUG = 1
LOGNOTICE = 'NOTICE'
LOGDEBUG = 'DEBUG'
LOGWARNING = 'WARNING'
LOGERROR = 'ERROR'
def log(msg, level=LOGNOTICE):
"""
msg : string - text to output.
level : [opt] integer - log level to ouput at. (default=LOGNOTICE)
*Note, You can use the above as keywords for arguments and skip certain
optional arguments. Once you use a keyword, all following
arguments require the keyword.
Text is written to the log for the following conditions.
XBMC loglevel == -1
NONE, nothing at all is logged)
XBMC loglevel == 0
NORMAL, shows LOGNOTICE, LOGERROR, LOGSEVERE and LOGFATAL
XBMC loglevel == 1
DEBUG, shows all
example:
xbmc.log(msg='This is a test string.', level=xbmc.LOGDEBUG)
"""
global _loglevel
if _loglevel == -1:
return
elif _loglevel == 0:
if level == LOGNOTICE:
msg = '%s: %s' % (level, msg)
print msg
_write_to_file(msg)
else:
msg = '%s: %s' % (level, msg)
print msg
_write_to_file(msg)
log('%s::Logging initiated!' % __name__)
def translatePath(path):
b, t = os.path.split( path )
return os.path.join( os.getcwd(), t )
class Keyboard:
def __init__(self, default='', heading=None, hidden=False):
self.default = default
if not heading:
heading = 'Keyboard Input'
self.heading = heading
self.hidden = hidden
def doModal(self):
self.text = raw_input('%s\n-->' % self.heading)
def getText(self):
return self.text
def isConfirmed(self):
return True
# ------------------------------------------------------------------------------
# xbmcplugin module functions and constants
# ------------------------------------------------------------------------------
_items_added = 0
class ListItem:
def __init__(self, *args, **kwargs):
pass
def setProperty(self, *args, **kwargs):
pass
def addDirectoryItem(handle, url, listitem, isFolder=False, totalItems=None):
"""
Callback function to pass directory contents back to XBMC.
Returns a bool for successful completion.
handle : integer - handle the plugin was started with.
url : string - url of the entry. would be plugin:// for another
virtual directory
listitem : ListItem - item to add.
isFolder : [opt] bool - True=folder / False=not a folder(default).
totalItems : [opt] integer - total number of items that will be passed.
used for progressbar
"\"
*Note, You can use the above as keywords for arguments and skip certain
optional arguments. Once you use a keyword, all following
arguments require the keyword.
example:
hndl = int(sys.argv[1])
url = 'F:\\\\Trailers\\\\300.mov'
if not xbmcplugin.addDirectoryItem(hndl, url, listitem, False, 50):
break
"""
global _items_added
_items_added += 1
def addDirectoryItems(handle, items, totalItems=None):
"""
Callback function to pass directory contents back to XBMC as a list.
Returns a bool for successful completion.
handle : integer - handle the plugin was started with.
items : List - list of (url, listitem[, isFolder])
as a tuple to add.
totalItems : [opt] integer - total number of items that
will be passed.(used for progressbar)
*Note, You can use the above as keywords for arguments.
Large lists benefit over using the standard addDirectoryItem()
You may call this more than once to add items in chunks
example:
hndl = int(sys.argv[0])
items = [(url, listitem, False,)]
if not xbmcplugin.addDirectoryItems(hndl, items):
raise Exception
"""
global _items_added
_items_added += len(items)
def endOfDirectory(handle, succeeded=True, updateListing=False, cacheToDisk=True):
"""
Callback function to tell XBMC that the end of the directory listing in
a virtualPythonFolder module is reached
handle : integer - handle the plugin was started with.
succeeded : [opt] bool - True = script completed successfully
False = script did not.
updateListing : [opt] bool - True = this folder should update the
current listing
False = Folder is a subfolder.
cacheToDisc : [opt] bool - True = Folder will cache if
extended time
False = this folder will never
cache to disc.
*Note, You can use the above as keywords for arguments and skip certain
optional arguments. Once you use a keyword, all following
arguments require the keyword.
example:
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=False)
"""
global _items_added
print '%s -> %d items added' % (__func)
def getSetting(id):
"""
Returns the value of a setting as a string.
id : string - id of the setting that the module needs to access.
*Note, You can use the above as a keyword.
example:
apikey = xbmcplugin.getSetting('apikey')
"""
global _settings
if _settings.has_key(id):
return _settings[id]
class Settings:
def __init__(self, path):
pass
def getSetting(self, name):
return '' | gpl-2.0 |
FlorianLudwig/odoo | addons/website_forum_doc/models/documentation.py | 368 | 3222 | # -*- coding: utf-8 -*-
from openerp.osv import osv, fields
class Documentation(osv.Model):
_name = 'forum.documentation.toc'
_description = 'Documentation ToC'
_inherit = ['website.seo.metadata']
_order = "parent_left"
_parent_order = "sequence, name"
_parent_store = True
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name', 'parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
# TODO master remove me
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_columns = {
'sequence': fields.integer('Sequence'),
'name': fields.char('Name', required=True, translate=True),
'introduction': fields.html('Introduction', translate=True),
'parent_id': fields.many2one('forum.documentation.toc', 'Parent Table Of Content', ondelete='cascade'),
'child_ids': fields.one2many('forum.documentation.toc', 'parent_id', 'Children Table Of Content'),
'parent_left': fields.integer('Left Parent', select=True),
'parent_right': fields.integer('Right Parent', select=True),
'post_ids': fields.one2many('forum.post', 'documentation_toc_id', 'Posts'),
'forum_id': fields.many2one('forum.forum', 'Forum', required=True),
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
class DocumentationStage(osv.Model):
_name = 'forum.documentation.stage'
_description = 'Post Stage'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence'),
'name': fields.char('Stage Name', required=True, translate=True),
}
class Post(osv.Model):
_inherit = 'forum.post'
_columns = {
'documentation_toc_id': fields.many2one('forum.documentation.toc', 'Documentation ToC', ondelete='set null'),
'documentation_stage_id': fields.many2one('forum.documentation.stage', 'Documentation Stage'),
'color': fields.integer('Color Index')
}
def _get_default_stage_id(self, cr, uid, context=None):
stage_ids = self.pool["forum.documentation.stage"].search(cr, uid, [], limit=1, context=context)
return stage_ids and stage_ids[0] or False
_defaults = {
'documentation_stage_id': _get_default_stage_id,
}
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('forum.documentation.stage')
stage_ids = stage_obj.search(cr, uid, [], context=context)
result = stage_obj.name_get(cr, uid, stage_ids, context=context)
return result, {}
_group_by_full = {
'documentation_stage_id': _read_group_stage_ids,
}
| agpl-3.0 |
chauhanhardik/populo | lms/djangoapps/courseware/tests/test_favicon.py | 125 | 1841 | from django.conf import settings
from django.core.urlresolvers import clear_url_caches, resolve
from django.test import TestCase
from django.test.utils import override_settings
from mock import patch
from nose.plugins.attrib import attr
import sys
@attr('shard_1')
class FaviconTestCase(TestCase):
def setUp(self):
super(FaviconTestCase, self).setUp()
def test_favicon_redirect(self):
resp = self.client.get("/favicon.ico")
self.assertEqual(resp.status_code, 301)
self.assertRedirects(
resp,
"/static/images/favicon.ico",
status_code=301, target_status_code=404 # @@@ how to avoid 404?
)
@override_settings(FAVICON_PATH="images/foo.ico")
def test_favicon_redirect_with_favicon_path_setting(self):
# for some reason I had to put this inline rather than just using
# the UrlResetMixin
urlconf = settings.ROOT_URLCONF
if urlconf in sys.modules:
reload(sys.modules[urlconf])
clear_url_caches()
resolve("/")
resp = self.client.get("/favicon.ico")
self.assertEqual(resp.status_code, 301)
self.assertRedirects(
resp,
"/static/images/foo.ico",
status_code=301, target_status_code=404 # @@@ how to avoid 404?
)
@patch.dict("django.conf.settings.FEATURES", {"USE_CUSTOM_THEME": True})
@override_settings(THEME_NAME="bar")
def test_favicon_redirect_with_theme(self):
self.assertEqual(settings.FEATURES["USE_CUSTOM_THEME"], True)
resp = self.client.get("/favicon.ico")
self.assertEqual(resp.status_code, 301)
self.assertRedirects(
resp,
"/static/images/foo.ico",
status_code=301, target_status_code=404 # @@@ how to avoid 404?
)
| agpl-3.0 |
giovannipro/map-the-glam | scraper/scrape/scrape_files-in-category.py | 1 | 4999 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Get files in category
import os # get file path
import sys # reset file encoding
import webbrowser # open webpages
import urllib, json, io # read json
from urllib import urlopen # open file
import time # get unix code
import csv # read csv
reload(sys)
sys.setdefaultencoding("utf-8")
# -----------------------------------
# Utilities
folder = os.path.dirname(os.path.realpath(__file__))
t = "\t"
n = "\n"
s = " "
def loop(x):
index = 0
while index < x:
index += 1
print(index)
def inloop(x):
print (x)
index = x
if index < x:
index -= 1
inloop(index)
def clean_url(title):
replace_01 = "?"
replace_02 = "&"
replace_03 = "ä"
replace_04 = "ö"
replace_05 = "u"
replace_06 = "("
replace_07 = ")"
replace_08 = ","
replace_09 = "-"
replace_10 = "…"
replace_11 = " "
clean = title.replace(replace_01,"%3f") \
.replace(replace_02,"%26") \
.replace(replace_03,"%e5") \
.replace(replace_04,"%f6") \
.replace(replace_05,"%fc") \
.replace(replace_06,"%28") \
.replace(replace_07,"%29") \
.replace(replace_08,"%2c") \
.replace(replace_09,"%2d") \
.replace(replace_10,"%20")
return clean
# test = clean_url("?????")
# print test
# -----------------------------------
# Main variables
commons_api = "https://commons.wikimedia.org/w/api.php?action=query&format=json&list=categorymembers&cmtitle=Category:"
limit = 500
# -----------------------------------
# Script
def get_files(category,cont):
counter = 0
f_out = folder + "/data/" + str(category) + ".tsv"
if cont == 0:
request = commons_api + category + "&cmlimit=" + str(limit)
else:
request = commons_api + category + "&cmlimit=" + str(limit) + "&cmcontinue=" + str(cont) # &rvcontinue=
response = urlopen(request).read()
data = json.loads(response)
with open(f_out, "w") as f: # a: add, w+: override
for x in data["query"]["categorymembers"]:
try:
title = x["title"];
output = title + n
counter+=1
f.write(output)
print(counter)
except:
pass
# index = 0
try:
new_new_cont = data["continue"]["cmcontinue"]
if (new_new_cont != 0 and new_new_cont != cont):
get_files(category,new_new_cont)
# index += 1
# print(index)
else:
print("stop")
except:
pass
def check_files(f_name):
start = time.time()
func = "check-files"
f_in = folder + "/data/" + f_name + ".tsv"
f_out = folder + "/data/" + f_name + "-" + func + "_out.tsv"
f_err = folder + "/data/" + f_name + "-" + func + "_err.tsv"
with open(f_in, "r") as f1:
with open(f_out, "w") as f2:
with open(f_err, "w") as f3:
tsvin = csv.reader(f1, delimiter = t)
def remove_duplicates(values):
output = []
seen = set()
for value in values:
if value not in seen:
output.append(value)
seen.add(value)
return output
def if_exist(titles):
output = []
ind = 0
for title in titles:
try:
request = "https://commons.wikimedia.org/w/api.php?action=query&format=json&prop=imageinfo&iiprop=timestamp&titles=" + title
response = urlopen(request).read()
data = json.loads(response)
for x in data["query"]["pages"]:
id_ = int(x)
if (id_ > 0):
# output = f2.write(title + n)
output.append(title)
print str(ind) # + t + title
else:
f3.write(title + n)
print str(ind) + t + title # + "<<<< error 1"
ind += 1
except:
f3.write(title + n)
print str(ind) + t + title # + " <<<< error 2"
pass
return output
check = "File:"
replace_01 = "?"
replace_02 = "&"
replace_03 = "ä"
replace_04 = "ö"
replace_05 = "u"
list_ = []
for file in tsvin:
title = file[1].replace(replace_01,"%3f").replace(replace_02,"%26").replace(replace_03,"%e5").replace(replace_04,"%f6").replace(replace_05,"%fc")
if (check in title):
list_.append(title)
else:
f3.write(title)
result_1 = remove_duplicates(list_)
result_2 = if_exist(result_1)
index = 0
for final in result_2:
output = str(index) + t + final + n
f2.write(output)
index +=1
end = time.time()
running_time = end - start
print (running_time)
"""
def test():
check = "File:"
replace_01 = "?"
replace_02 = "&"
list_ = ["erjnfdc", "File:33","daix?", "fe?cdiwsb?"]
for x in list_:
clean = x.replace(replace_01,"xxx")
print(clean)
"""
# -----------------------------------
# Launch scripts
# check_files("Media_contributed_by_the_ETH-Bibliothek")
# test()
# get_files("Media_contributed_by_the_ETH-Bibliothek",0) # Media_contributed_by_the_Swiss_National_Library Media_contributed_by_the_Swiss_Federal_Archives Media_contributed_by_the_Swiss_Federal_Archives Media_contributed_by_the_ETH-Bibliothek
get_files("GWToolset Batch Upload",0)
| gpl-3.0 |
alexrao/YouCompleteMe | third_party/ycmd/ycmd/completers/all/tests/identifier_completer_test.py | 34 | 4504 | #!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from nose.tools import eq_
from ycmd.completers.all import identifier_completer as ic
from ycmd.request_wrap import RequestWrap
from ycmd.tests.test_utils import BuildRequest
def BuildRequestWrap( contents, column_num, line_num = 1 ):
return RequestWrap( BuildRequest( column_num = column_num,
line_num = line_num,
contents = contents ) )
def GetCursorIdentifier_StartOfLine_test():
eq_( 'foo', ic._GetCursorIdentifier( BuildRequestWrap( 'foo', 1 ) ) )
eq_( 'fooBar', ic._GetCursorIdentifier( BuildRequestWrap( 'fooBar', 1 ) ) )
def GetCursorIdentifier_EndOfLine_test():
eq_( 'foo', ic._GetCursorIdentifier( BuildRequestWrap( 'foo', 3 ) ) )
def GetCursorIdentifier_PastEndOfLine_test():
eq_( '', ic._GetCursorIdentifier( BuildRequestWrap( 'foo', 11 ) ) )
def GetCursorIdentifier_NegativeColumn_test():
eq_( 'foo', ic._GetCursorIdentifier( BuildRequestWrap( 'foo', -10 ) ) )
def GetCursorIdentifier_StartOfLine_StopsAtNonIdentifierChar_test():
eq_( 'foo', ic._GetCursorIdentifier( BuildRequestWrap( 'foo(goo)', 1 ) ) )
def GetCursorIdentifier_AtNonIdentifier_test():
eq_( 'goo', ic._GetCursorIdentifier( BuildRequestWrap( 'foo(goo)', 4 ) ) )
def GetCursorIdentifier_WalksForwardForIdentifier_test():
eq_( 'foo', ic._GetCursorIdentifier( BuildRequestWrap( ' foo', 1 ) ) )
def GetCursorIdentifier_FindsNothingForward_test():
eq_( '', ic._GetCursorIdentifier( BuildRequestWrap( 'foo ()***()', 5 ) ) )
def GetCursorIdentifier_SingleCharIdentifier_test():
eq_( 'f', ic._GetCursorIdentifier( BuildRequestWrap( ' f ', 1 ) ) )
def GetCursorIdentifier_StartsInMiddleOfIdentifier_test():
eq_( 'foobar', ic._GetCursorIdentifier( BuildRequestWrap( 'foobar', 4 ) ) )
def GetCursorIdentifier_LineEmpty_test():
eq_( '', ic._GetCursorIdentifier( BuildRequestWrap( '', 12 ) ) )
def PreviousIdentifier_Simple_test():
eq_( 'foo', ic._PreviousIdentifier( 2, BuildRequestWrap( 'foo', 4 ) ) )
def PreviousIdentifier_ColumnInMiddleStillWholeIdent_test():
eq_( 'foobar', ic._PreviousIdentifier( 2, BuildRequestWrap( 'foobar', 4 ) ) )
def PreviousIdentifier_IgnoreForwardIdents_test():
eq_( 'foo',
ic._PreviousIdentifier( 2, BuildRequestWrap( 'foo bar zoo', 4 ) ) )
def PreviousIdentifier_IgnoreTooSmallIdent_test():
eq_( '', ic._PreviousIdentifier( 4, BuildRequestWrap( 'foo', 4 ) ) )
def PreviousIdentifier_IgnoreTooSmallIdent_DontContinueLooking_test():
eq_( '', ic._PreviousIdentifier( 4, BuildRequestWrap( 'abcde foo', 10 ) ) )
def PreviousIdentifier_WhitespaceAfterIdent_test():
eq_( 'foo', ic._PreviousIdentifier( 2, BuildRequestWrap( 'foo ', 6 ) ) )
def PreviousIdentifier_JunkAfterIdent_test():
eq_( 'foo',
ic._PreviousIdentifier( 2, BuildRequestWrap( 'foo ;;()** ', 13 ) ) )
def PreviousIdentifier_IdentInMiddleOfJunk_test():
eq_( 'aa',
ic._PreviousIdentifier( 2, BuildRequestWrap( 'foo ;;(aa)** ', 13 ) ) )
def PreviousIdentifier_IdentOnPreviousLine_test():
eq_( 'foo',
ic._PreviousIdentifier( 2, BuildRequestWrap( 'foo\n ',
column_num = 3,
line_num = 2 ) ) )
eq_( 'foo',
ic._PreviousIdentifier( 2, BuildRequestWrap( 'foo\n',
column_num = 1,
line_num = 2 ) ) )
def PreviousIdentifier_IdentOnPreviousLine_JunkAfterIdent_test():
eq_( 'foo',
ic._PreviousIdentifier( 2, BuildRequestWrap( 'foo **;()\n ',
column_num = 3,
line_num = 2 ) ) )
| gpl-3.0 |
yongshengwang/hue | apps/oozie/src/oozie/decorators.py | 25 | 6936 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from desktop.lib.exceptions_renderable import PopupException
from desktop.models import Document, Document2
from oozie.models import Job, Node, Dataset
LOG = logging.getLogger(__name__)
def check_document_access_permission():
def inner(view_func):
def decorate(request, *args, **kwargs):
doc_id = {}
try:
if request.GET.get('workflow') or request.POST.get('workflow'):
workflow_id = request.GET.get('workflow') or request.POST.get('workflow')
if workflow_id.isdigit():
doc_id['id'] = workflow_id
else:
doc_id['uuid'] = workflow_id
elif request.GET.get('uuid'):
doc_id['uuid'] = request.GET.get('uuid')
elif request.GET.get('coordinator'):
doc_id['id'] = request.GET.get('coordinator')
elif request.GET.get('bundle'):
doc_id['id'] = request.GET.get('bundle')
elif 'doc_id' in kwargs:
doc_id['id'] = kwargs['doc_id']
if doc_id:
doc2 = Document2.objects.get(**doc_id)
doc2.doc.get().can_read_or_exception(request.user)
except Document2.DoesNotExist:
raise PopupException(_('Job %(id)s does not exist') % {'id': doc_id})
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def check_document_modify_permission():
def inner(view_func):
def decorate(request, *args, **kwargs):
doc_id = None
job = json.loads(request.POST.get('workflow', '{}'))
if not job:
job = json.loads(request.POST.get('coordinator', '{}'))
elif not job:
job = json.loads(request.POST.get('bundle', '{}'))
if job and job.get('id'):
doc_id = job.get('id')
try:
doc2 = Document2.objects.get(id=job['id'])
doc2.doc.get().can_write_or_exception(request.user)
except Document.DoesNotExist:
raise PopupException(_('Job %(id)s does not exist') % {'id': doc_id})
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
## Oozie v1 below
def check_job_access_permission(exception_class=PopupException):
"""
Decorator ensuring that the user has access to the workflow or coordinator.
Arg: 'workflow' or 'coordinator' id.
Return: the workflow of coordinator or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
def inner(view_func):
def decorate(request, *args, **kwargs):
if 'workflow' in kwargs:
job_type = 'workflow'
elif 'coordinator' in kwargs:
job_type = 'coordinator'
else:
job_type = 'bundle'
job = kwargs.get(job_type)
if job is not None:
job = Job.objects.can_read_or_exception(request, job, exception_class=exception_class)
kwargs[job_type] = job
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def check_job_edition_permission(authorize_get=False, exception_class=PopupException):
"""
Decorator ensuring that the user has the permissions to modify a workflow or coordinator.
Need to appear below @check_job_access_permission
"""
def inner(view_func):
def decorate(request, *args, **kwargs):
if 'workflow' in kwargs:
job_type = 'workflow'
elif 'coordinator' in kwargs:
job_type = 'coordinator'
else:
job_type = 'bundle'
job = kwargs.get(job_type)
if job is not None and not (authorize_get and request.method == 'GET'):
Job.objects.can_edit_or_exception(request, job, exception_class=exception_class)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def check_action_access_permission(view_func):
"""
Decorator ensuring that the user has access to the workflow action.
Arg: 'workflow action' id.
Return: the workflow action or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
def decorate(request, *args, **kwargs):
action_id = kwargs.get('action')
action = Node.objects.get(id=action_id).get_full_node()
Job.objects.can_read_or_exception(request, action.workflow.id)
kwargs['action'] = action
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def check_action_edition_permission(view_func):
"""
Decorator ensuring that the user has the permissions to modify a workflow action.
Need to appear below @check_action_access_permission
"""
def decorate(request, *args, **kwargs):
action = kwargs.get('action')
Job.objects.can_edit_or_exception(request, action.workflow)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def check_dataset_access_permission(view_func):
"""
Decorator ensuring that the user has access to dataset.
Arg: 'dataset'.
Return: the dataset or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
def decorate(request, *args, **kwargs):
dataset = kwargs.get('dataset')
if dataset is not None:
dataset = Dataset.objects.can_read_or_exception(request, dataset)
kwargs['dataset'] = dataset
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def check_dataset_edition_permission(authorize_get=False):
"""
Decorator ensuring that the user has the permissions to modify a dataset.
A dataset can be edited if the coordinator that owns the dataset can be edited.
Need to appear below @check_dataset_access_permission
"""
def inner(view_func):
def decorate(request, *args, **kwargs):
dataset = kwargs.get('dataset')
if dataset is not None and not (authorize_get and request.method == 'GET'):
Job.objects.can_edit_or_exception(request, dataset.coordinator)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
| apache-2.0 |
blaggacao/OpenUpgrade | addons/sale_crm/sale_crm.py | 320 | 1429 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class sale_order(osv.osv):
_name = "sale.order"
_inherit = ['sale.order', 'crm.tracking.mixin']
_columns = {
'categ_ids': fields.many2many('crm.case.categ', 'sale_order_category_rel', 'order_id', 'category_id', 'Tags', \
domain="['|', ('section_id', '=', section_id), ('section_id', '=', False), ('object_id.model', '=', 'crm.lead')]", context="{'object_name': 'crm.lead'}")
}
| agpl-3.0 |
maelstromio/maelstrom-py | maelstrom/cass_conn.py | 2 | 1462 | from cassandra.cluster import Cluster, NoHostAvailable
from cassandra.decoder import dict_factory
class CassandraConnection(object):
ip = None
kp = None
def __init__(self, cass_ip, cass_kp):
self.ip = cass_ip
self.kp = cass_kp
try:
self.cluster = Cluster(contact_points=self.ip,
control_connection_timeout=10000.0)
self.session = TransactionManager(self)
self.session.row_factory = dict_factory
except NoHostAvailable:
print "TimeoutError: Possibly a connection issue."
def destroy_cluster(self):
ip = None
kp = None
self.cluster.shutdown()
self.session.shutdown()
class TransactionManager:
sessions = []
def __init__(self, connector):
self.connector = connector
num_sessions = 1 # cpu_count() - 1 if cpu_count() > 1 else 1
for i in range(num_sessions):
session = self.connector.cluster.connect(self.connector.kp)
session.row_factory = dict_factory
self.sessions.append(session)
print self.sessions
def execute(self, *args, **kwargs):
current_session = self.sessions.pop(0)
result = current_session.execute_async(*args, **kwargs)
self.sessions.append(current_session)
return result
def shutdown(self):
for session in self.sessions:
session.shutdown()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.