repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
hbldh/pymetawear | pymetawear/client.py | 1 | 5352 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Main module for PyMetaWear
.. moduleauthor:: hbldh <henrik.blidh@nedomkull.com>
Created on 2016-03-30
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from mbientlab.metawear import MetaWear, libmetawear
from pymetawear import add_stream_logger, modules
log = logging.getLogger(__name__)
_model_names = [
"Unknown",
"MetaWear R",
"MetaWear RG",
"MetaWear RPro",
"MetaWear C",
"MetaWear CPro",
"MetaEnvironment",
"MetaDetector",
"MetaHealth",
"MetaTracker",
"MetaMotion R",
"MetaMotion C"
]
class MetaWearClient(object):
"""A MetaWear communication client.
This client bridges the gap between the
`MetaWear C++ API <https://github.com/mbientlab/Metawear-CppAPI>`_
and a GATT communication package in Python. It provides Pythonic
interface to using the MetaWear boards, allowing for rapid
development and testing.
:param str address: A Bluetooth MAC address to a MetaWear board.
:param str device: Specifying which Bluetooth device to use. Defaults
to ``hci0`` on Linux. Not available on Windows.
:param bool connect: If client should connect automatically, or wait for
explicit :py:meth:`~MetaWearClient.connect` call. Default is ``True``.
:param bool debug: If printout of all sent and received
data should be done.
"""
def __init__(self, address, device='hci0', connect=True, debug=False):
"""Constructor."""
self._address = address
self._debug = debug
self._connect = connect
if self._debug:
add_stream_logger()
log.info("Creating MetaWearClient for {0}...".format(address))
self.mw = MetaWear(self._address, hci_mac=device)
log.debug("Client started for BLE device {0}...".format(self._address))
self.accelerometer = None
#self.gpio = None
self.gyroscope = None
self.magnetometer = None
self.barometer = None
self.ambient_light = None
self.switch = None
self.settings = None
self.temperature = None
self.haptic = None
self.led = None
self.sensorfusion = None
if connect:
self.connect()
def __enter__(self):
if not self._connect:
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.disconnect()
except Exception as e:
print("Could not disconnect: {0}".format(e))
@property
def board(self):
return self.mw.board
@property
def firmware_version(self):
return self.mw.info['firmware']
@property
def hardware_version(self):
return self.mw.info['hardware']
@property
def manufacturer(self):
return self.mw.info['manufacturer']
@property
def serial(self):
return self.mw.info['serial']
@property
def model(self):
return self.mw.info['model']
def __str__(self):
return "MetaWearClient, {0}: {1}".format(
self._address, self.mw.info)
def __repr__(self):
return "<MetaWearClient, {0}>".format(self._address)
def connect(self):
"""Connect this client to the MetaWear device."""
self.mw.connect()
self._initialize_modules()
def disconnect(self):
"""Disconnects this client from the MetaWear device."""
self.mw.disconnect()
def _initialize_modules(self):
#self.gpio = modules.GpioModule(
# self.board,
# libmetawear.mbl_mw_metawearboard_lookup_module(
# self.board, modules.Modules.MBL_MW_MODULE_GPIO))
self.accelerometer = modules.AccelerometerModule(
self.board,
libmetawear.mbl_mw_metawearboard_lookup_module(
self.board, modules.Modules.MBL_MW_MODULE_ACCELEROMETER))
self.gyroscope = modules.GyroscopeModule(
self.board,
libmetawear.mbl_mw_metawearboard_lookup_module(
self.board, modules.Modules.MBL_MW_MODULE_GYRO))
self.magnetometer = modules.MagnetometerModule(
self.board,
libmetawear.mbl_mw_metawearboard_lookup_module(
self.board, modules.Modules.MBL_MW_MODULE_MAGNETOMETER))
self.barometer = modules.BarometerModule(
self.board,
libmetawear.mbl_mw_metawearboard_lookup_module(
self.board, modules.Modules.MBL_MW_MODULE_BAROMETER))
self.ambient_light = modules.AmbientLightModule(
self.board,
libmetawear.mbl_mw_metawearboard_lookup_module(
self.board, modules.Modules.MBL_MW_MODULE_AMBIENT_LIGHT))
self.switch = modules.SwitchModule(self.board)
self.settings = modules.SettingsModule(self.board)
self.temperature = modules.TemperatureModule(self.board)
self.haptic = modules.HapticModule(self.board)
self.led = modules.LEDModule(self.board)
self.sensorfusion = modules.SensorFusionModule(
self.board,
libmetawear.mbl_mw_metawearboard_lookup_module(
self.board, modules.Modules.MBL_MW_MODULE_SENSOR_FUSION))
| mit |
bjmc/oauthlib | tests/oauth1/rfc5849/test_signatures.py | 10 | 15058 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
from oauthlib.oauth1.rfc5849.signature import collect_parameters
from oauthlib.oauth1.rfc5849.signature import construct_base_string
from oauthlib.oauth1.rfc5849.signature import normalize_base_string_uri
from oauthlib.oauth1.rfc5849.signature import normalize_parameters
from oauthlib.oauth1.rfc5849.signature import sign_hmac_sha1, sign_hmac_sha1_with_client
from oauthlib.oauth1.rfc5849.signature import sign_rsa_sha1, sign_rsa_sha1_with_client
from oauthlib.oauth1.rfc5849.signature import sign_plaintext, sign_plaintext_with_client
from oauthlib.common import unicode_type
from ...unittest import TestCase
class SignatureTests(TestCase):
class MockClient(dict):
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
def decode(self):
for k, v in self.items():
self[k] = v.decode('utf-8')
uri_query = "b5=%3D%253D&a3=a&c%40=&a2=r%20b&c2=&a3=2+q"
authorization_header = """OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D" """.strip()
body = "content=This+is+being+the+body+of+things"
http_method = b"post"
base_string_url = quote("http://example.com/request?b5=%3D%253D"
"&a3=a&c%40=&a2=r%20b").encode('utf-8')
normalized_encoded_request_parameters = quote(
'OAuth realm="Example",'
'oauth_consumer_key="9djdj82h48djs9d2",'
'oauth_token="kkk9d7dh3k39sjv7",'
'oauth_signature_method="HMAC-SHA1",'
'oauth_timestamp="137131201",'
'oauth_nonce="7d8f3e4a",'
'oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"'
).encode('utf-8')
client_secret = b"ECrDNoq1VYzzzzzzzzzyAK7TwZNtPnkqatqZZZZ"
resource_owner_secret = b"just-a-string asdasd"
control_base_string = (
"POST&http%253A%2F%2Fexample.com%2Frequest%253F"
"b5%253D%25253D%2525253D%2526"
"a3%253D"
"a%2526"
"c%252540%253D%2526"
"a2%253D"
"r%252520b&"
"OAuth%2520realm%253D%2522Example%2522%252C"
"oauth_consumer_key%253D%25229djdj82h48djs9d2%2522%252C"
"oauth_token%253D%2522kkk9d7dh3k39sjv7%2522%252C"
"oauth_signature_method%253D%2522HMAC-SHA1%2522%252C"
"oauth_timestamp%253D%2522137131201%2522%252C"
"oauth_nonce%253D%25227d8f3e4a%2522%252C"
"oauth_signature%253D%2522bYT5CMsGcbgUdFHObYMEfcx6bsw%25253D%2522")
def setUp(self):
self.client = self.MockClient(
client_secret = self.client_secret,
resource_owner_secret = self.resource_owner_secret
)
def test_construct_base_string(self):
"""
Example text to be turned into a base string::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"
Sample Base string generated and tested against::
POST&http%253A%2F%2Fexample.com%2Frequest%253Fb5%253D%25253D%252525
3D%2526a3%253Da%2526c%252540%253D%2526a2%253Dr%252520b&OAuth%2520re
alm%253D%2522Example%2522%252Coauth_consumer_key%253D%25229djdj82h4
8djs9d2%2522%252Coauth_token%253D%2522kkk9d7dh3k39sjv7%2522%252Coau
th_signature_method%253D%2522HMAC-SHA1%2522%252Coauth_timestamp%253
D%2522137131201%2522%252Coauth_nonce%253D%25227d8f3e4a%2522%252Coau
th_signature%253D%2522bYT5CMsGcbgUdFHObYMEfcx6bsw%25253D%2522
"""
self.assertRaises(ValueError, construct_base_string,
self.http_method,
self.base_string_url,
self.normalized_encoded_request_parameters)
self.assertRaises(ValueError, construct_base_string,
self.http_method.decode('utf-8'),
self.base_string_url,
self.normalized_encoded_request_parameters)
self.assertRaises(ValueError, construct_base_string,
self.http_method.decode('utf-8'),
self.base_string_url.decode('utf-8'),
self.normalized_encoded_request_parameters)
base_string = construct_base_string(
self.http_method.decode('utf-8'),
self.base_string_url.decode('utf-8'),
self.normalized_encoded_request_parameters.decode('utf-8')
)
self.assertEqual(self.control_base_string, base_string)
def test_normalize_base_string_uri(self):
"""
Example text to be turned into a normalized base string uri::
GET /?q=1 HTTP/1.1
Host: www.example.net:8080
Sample string generated::
https://www.example.net:8080/
"""
# test for unicode failure
uri = b"www.example.com:8080"
self.assertRaises(ValueError, normalize_base_string_uri, uri)
# test for missing scheme
uri = "www.example.com:8080"
self.assertRaises(ValueError, normalize_base_string_uri, uri)
# test a URI with the default port
uri = "http://www.example.com:80/"
self.assertEquals(normalize_base_string_uri(uri),
"http://www.example.com/")
# test a URI missing a path
uri = "http://www.example.com"
self.assertEquals(normalize_base_string_uri(uri),
"http://www.example.com/")
# test a relative URI
uri = "/a-host-relative-uri"
host = "www.example.com"
self.assertRaises(ValueError, normalize_base_string_uri, (uri, host))
# test overriding the URI's netloc with a host argument
uri = "http://www.example.com/a-path"
host = "alternatehost.example.com"
self.assertEquals(normalize_base_string_uri(uri, host),
"http://alternatehost.example.com/a-path")
def test_collect_parameters(self):
"""We check against parameters multiple times in case things change
after more parameters are added.
"""
self.assertEquals(collect_parameters(), [])
# Check against uri_query
parameters = collect_parameters(uri_query=self.uri_query)
correct_parameters = [('b5', '=%3D'),
('a3', 'a'),
('c@', ''),
('a2', 'r b'),
('c2', ''),
('a3', '2 q')]
self.assertEqual(sorted(parameters), sorted(correct_parameters))
headers = {'Authorization': self.authorization_header}
# check against authorization header as well
parameters = collect_parameters(
uri_query=self.uri_query, headers=headers)
parameters_with_realm = collect_parameters(
uri_query=self.uri_query, headers=headers, with_realm=True)
# Redo the checks against all the parameters. Duplicated code but
# better safety
correct_parameters += [
('oauth_nonce', '7d8f3e4a'),
('oauth_timestamp', '137131201'),
('oauth_consumer_key', '9djdj82h48djs9d2'),
('oauth_signature_method', 'HMAC-SHA1'),
('oauth_token', 'kkk9d7dh3k39sjv7')]
correct_parameters_with_realm = (
correct_parameters + [('realm', 'Example')])
self.assertEqual(sorted(parameters), sorted(correct_parameters))
self.assertEqual(sorted(parameters_with_realm),
sorted(correct_parameters_with_realm))
# Add in the body.
# TODO: Add more content for the body. Daniel Greenfeld 2012/03/12
# Redo again the checks against all the parameters. Duplicated code
# but better safety
parameters = collect_parameters(
uri_query=self.uri_query, body=self.body, headers=headers)
correct_parameters += [
('content', 'This is being the body of things')]
self.assertEqual(sorted(parameters), sorted(correct_parameters))
def test_normalize_parameters(self):
""" We copy some of the variables from the test method above."""
headers = {'Authorization': self.authorization_header}
parameters = collect_parameters(
uri_query=self.uri_query, body=self.body, headers=headers)
normalized = normalize_parameters(parameters)
# Unicode everywhere and always
self.assertIsInstance(normalized, unicode_type)
# Lets see if things are in order
# check to see that querystring keys come in alphanumeric order:
querystring_keys = ['a2', 'a3', 'b5', 'content', 'oauth_consumer_key',
'oauth_nonce', 'oauth_signature_method',
'oauth_timestamp', 'oauth_token']
index = -1 # start at -1 because the 'a2' key starts at index 0
for key in querystring_keys:
self.assertGreater(normalized.index(key), index)
index = normalized.index(key)
# Control signature created using openssl:
# echo -n $(cat <message>) | openssl dgst -binary -hmac <key> | base64
control_signature = "Uau4O9Kpd2k6rvh7UZN/RN+RG7Y="
def test_sign_hmac_sha1(self):
"""Verifying HMAC-SHA1 signature against one created by OpenSSL."""
self.assertRaises(ValueError, sign_hmac_sha1, self.control_base_string,
self.client_secret, self.resource_owner_secret)
sign = sign_hmac_sha1(self.control_base_string,
self.client_secret.decode('utf-8'),
self.resource_owner_secret.decode('utf-8'))
self.assertEquals(len(sign), 28)
self.assertEquals(sign, self.control_signature)
def test_sign_hmac_sha1_with_client(self):
self.assertRaises(ValueError,
sign_hmac_sha1_with_client,
self.control_base_string,
self.client)
self.client.decode()
sign = sign_hmac_sha1_with_client(
self.control_base_string, self.client)
self.assertEquals(len(sign), 28)
self.assertEquals(sign, self.control_signature)
control_base_string_rsa_sha1 = (
b"POST&http%253A%2F%2Fexample.com%2Frequest%253Fb5%253D"
b"%25253D%2525253D%2526a3%253Da%2526c%252540%253D%2526"
b"a2%253Dr%252520b&OAuth%2520realm%253D%2522Example%25"
b"22%252Coauth_consumer_key%253D%25229djdj82h48djs9d2"
b"%2522%252Coauth_token%253D%2522kkk9d7dh3k39sjv7%2522"
b"%252Coauth_signature_method%253D%2522HMAC-SHA1%2522"
b"%252Coauth_timestamp%253D%2522137131201%2522%252Coau"
b"th_nonce%253D%25227d8f3e4a%2522%252Coauth_signature"
b"%253D%2522bYT5CMsGcbgUdFHObYMEfcx6bsw%25253D%2522")
# Generated using: $ openssl genrsa -out <key>.pem 1024
# PEM encoding requires the key to be concatenated with
# linebreaks.
rsa_private_key = b"""-----BEGIN RSA PRIVATE KEY-----
MIICXgIBAAKBgQDk1/bxyS8Q8jiheHeYYp/4rEKJopeQRRKKpZI4s5i+UPwVpupG
AlwXWfzXwSMaKPAoKJNdu7tqKRniqst5uoHXw98gj0x7zamu0Ck1LtQ4c7pFMVah
5IYGhBi2E9ycNS329W27nJPWNCbESTu7snVlG8V8mfvGGg3xNjTMO7IdrwIDAQAB
AoGBAOQ2KuH8S5+OrsL4K+wfjoCi6MfxCUyqVU9GxocdM1m30WyWRFMEz2nKJ8fR
p3vTD4w8yplTOhcoXdQZl0kRoaDzrcYkm2VvJtQRrX7dKFT8dR8D/Tr7dNQLOXfC
DY6xveQczE7qt7Vk7lp4FqmxBsaaEuokt78pOOjywZoInjZhAkEA9wz3zoZNT0/i
rf6qv2qTIeieUB035N3dyw6f1BGSWYaXSuerDCD/J1qZbAPKKhyHZbVawFt3UMhe
542UftBaxQJBAO0iJy1I8GQjGnS7B3yvyH3CcLYGy296+XO/2xKp/d/ty1OIeovx
C60pLNwuFNF3z9d2GVQAdoQ89hUkOtjZLeMCQQD0JO6oPHUeUjYT+T7ImAv7UKVT
Suy30sKjLzqoGw1kR+wv7C5PeDRvscs4wa4CW9s6mjSrMDkDrmCLuJDtmf55AkEA
kmaMg2PNrjUR51F0zOEFycaaqXbGcFwe1/xx9zLmHzMDXd4bsnwt9kk+fe0hQzVS
JzatanQit3+feev1PN3QewJAWv4RZeavEUhKv+kLe95Yd0su7lTLVduVgh4v5yLT
Ga6FHdjGPcfajt+nrpB1n8UQBEH9ZxniokR/IPvdMlxqXA==
-----END RSA PRIVATE KEY-----
"""
@property
def control_signature_rsa_sha1(self):
# Base string saved in "<message>". Signature obtained using:
# $ echo -n $(cat <message>) | openssl dgst -sign <key>.pem | base64
# where echo -n suppresses the last linebreak.
return (
"zV5g8ArdMuJuOXlH8XOqfLHS11XdthfIn4HReDm7jz8JmgLabHGmVBqCkCfZoFJPH"
"dka7tLvCplK/jsV4FUOnftrJOQhbXguuBdi87/hmxOFKLmQYqqlEW7BdXmwKLZcki"
"qq3qE5XziBgKSAFRkxJ4gmJAymvJBtrJYN9728rK8="
)
def test_sign_rsa_sha1(self):
"""Verify RSA-SHA1 signature against one created by OpenSSL."""
base_string = self.control_base_string_rsa_sha1
private_key = self.rsa_private_key
control_signature = self.control_signature_rsa_sha1
sign = sign_rsa_sha1(base_string, private_key)
self.assertEquals(sign, control_signature)
sign = sign_rsa_sha1(base_string.decode('utf-8'), private_key)
self.assertEquals(sign, control_signature)
def test_sign_rsa_sha1_with_client(self):
base_string = self.control_base_string_rsa_sha1
self.client.rsa_key = self.rsa_private_key
control_signature = self.control_signature_rsa_sha1
sign = sign_rsa_sha1_with_client(base_string, self.client)
self.assertEquals(sign, control_signature)
self.client.decode() ## Decode `rsa_private_key` from UTF-8
sign = sign_rsa_sha1_with_client(base_string, self.client)
self.assertEquals(sign, control_signature)
control_signature_plaintext = (
"ECrDNoq1VYzzzzzzzzzyAK7TwZNtPnkqatqZZZZ&"
"just-a-string%20%20%20%20asdasd")
def test_sign_plaintext(self):
""" """
self.assertRaises(ValueError, sign_plaintext, self.client_secret,
self.resource_owner_secret)
sign = sign_plaintext(self.client_secret.decode('utf-8'),
self.resource_owner_secret.decode('utf-8'))
self.assertEquals(sign, self.control_signature_plaintext)
def test_sign_plaintext_with_client(self):
self.assertRaises(ValueError, sign_plaintext_with_client,
None, self.client)
self.client.decode()
sign = sign_plaintext_with_client(None, self.client)
self.assertEquals(sign, self.control_signature_plaintext)
| bsd-3-clause |
zerkrx/zerkbox | lib/youtube_dl/extractor/thesun.py | 45 | 1067 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .ooyala import OoyalaIE
class TheSunIE(InfoExtractor):
_VALID_URL = r'https://(?:www\.)?thesun\.co\.uk/[^/]+/(?P<id>\d+)'
_TEST = {
'url': 'https://www.thesun.co.uk/tvandshowbiz/2261604/orlando-bloom-and-katy-perry-post-adorable-instagram-video-together-celebrating-thanksgiving-after-split-rumours/',
'info_dict': {
'id': '2261604',
'title': 'md5:cba22f48bad9218b64d5bbe0e16afddf',
},
'playlist_count': 2,
}
def _real_extract(self, url):
article_id = self._match_id(url)
webpage = self._download_webpage(url, article_id)
entries = []
for ooyala_id in re.findall(
r'<[^>]+\b(?:id\s*=\s*"thesun-ooyala-player-|data-content-id\s*=\s*")([^"]+)',
webpage):
entries.append(OoyalaIE._build_url_result(ooyala_id))
return self.playlist_result(
entries, article_id, self._og_search_title(webpage, fatal=False))
| gpl-3.0 |
rdmorganiser/rdmo | rdmo/options/migrations/0018_data_migration.py | 2 | 1112 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-03-13 11:22
from __future__ import unicode_literals
from django.db import migrations
def set_null_to_blank(queryset, fields):
for element in queryset:
for field in fields:
value = getattr(element, field)
if value is None:
setattr(element, field, '')
element.save()
def run_data_migration(apps, schema_editor):
OptionSet = apps.get_model('options', 'OptionSet')
Option = apps.get_model('options', 'Option')
set_null_to_blank(OptionSet.objects.all(), [
'uri',
'uri_prefix',
'key',
'comment',
])
set_null_to_blank(Option.objects.all(), [
'uri',
'uri_prefix',
'key',
'path',
'comment',
'text_lang1',
'text_lang2',
'text_lang3',
'text_lang4',
'text_lang5',
])
class Migration(migrations.Migration):
dependencies = [
('options', '0017_add_language_fields'),
]
operations = [
migrations.RunPython(run_data_migration),
]
| apache-2.0 |
xebitstudios/Kayak | examples/poisson_glm.py | 3 | 1224 | import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
import sys
sys.path.append('..')
import kayak
N = 10000
D = 5
P = 1
learn = 0.00001
batch_size = 500
# Random inputs.
X = npr.randn(N,D)
true_W = npr.randn(D,P)
lam = np.exp(np.dot(X, true_W))
Y = npr.poisson(lam)
kyk_batcher = kayak.Batcher(batch_size, N)
# Build network.
kyk_inputs = kayak.Inputs(X, kyk_batcher)
# Labels.
kyk_targets = kayak.Targets(Y, kyk_batcher)
# Weights.
W = 0.01*npr.randn(D,P)
kyk_W = kayak.Parameter(W)
# Linear layer.
kyk_activation = kayak.MatMult( kyk_inputs, kyk_W)
# Exponential inverse-link function.
kyk_lam = kayak.ElemExp(kyk_activation)
# Poisson negative log likelihood.
kyk_nll = kyk_lam - kayak.ElemLog(kyk_lam) * kyk_targets
# Sum the losses.
kyk_loss = kayak.MatSum( kyk_nll )
for ii in xrange(100):
for batch in kyk_batcher:
loss = kyk_loss.value
print loss, np.sum((kyk_W.value - true_W)**2)
grad = kyk_loss.grad(kyk_W)
kyk_W.value -= learn * grad
# Plot the true and inferred rate for a subset of data.
T_slice = slice(0,100)
kyk_inputs.value = X[T_slice,:]
plt.figure()
plt.plot(lam[T_slice], 'k')
plt.plot(kyk_lam.value, '--r')
plt.show() | mit |
xifle/greensc | tools/scons/scons-local-2.0.1/SCons/Tool/ipkg.py | 61 | 2498 | """SCons.Tool.ipkg
Tool-specific initialization for ipkg.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
The ipkg tool calls the ipkg-build. Its only argument should be the
packages fake_root.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ipkg.py 5134 2010/08/16 23:02:40 bdeegan"
import os
import SCons.Builder
def generate(env):
"""Add Builders and construction variables for ipkg to an Environment."""
try:
bld = env['BUILDERS']['Ipkg']
except KeyError:
bld = SCons.Builder.Builder( action = '$IPKGCOM',
suffix = '$IPKGSUFFIX',
source_scanner = None,
target_scanner = None)
env['BUILDERS']['Ipkg'] = bld
env['IPKG'] = 'ipkg-build'
env['IPKGCOM'] = '$IPKG $IPKGFLAGS ${SOURCE}'
env['IPKGUSER'] = os.popen('id -un').read().strip()
env['IPKGGROUP'] = os.popen('id -gn').read().strip()
env['IPKGFLAGS'] = SCons.Util.CLVar('-o $IPKGUSER -g $IPKGGROUP')
env['IPKGSUFFIX'] = '.ipk'
def exists(env):
return env.Detect('ipkg-build')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
fuselock/odoo | addons/portal_project/project.py | 285 | 1809 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-TODAY OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class portal_project(osv.Model):
""" Update of mail_mail class, to add the signin URL to notifications. """
_inherit = 'project.project'
def _get_visibility_selection(self, cr, uid, context=None):
""" Override to add portal option. """
selection = super(portal_project, self)._get_visibility_selection(cr, uid, context=context)
idx = [item[0] for item in selection].index('public')
selection.insert((idx + 1), ('portal', _('Customer related project: visible through portal')))
return selection
# return [('public', 'All Users'),
# ('portal', 'Portal Users and Employees'),
# ('employees', 'Employees Only'),
# ('followers', 'Followers Only')]
| agpl-3.0 |
hmen89/odoo | addons/l10n_fr_hr_payroll/__openerp__.py | 374 | 2165 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'French Payroll',
'category': 'Localization/Payroll',
'author': 'Yannick Buron (SYNERPGY)',
'depends': ['hr_payroll', 'l10n_fr'],
'version': '1.0',
'description': """
French Payroll Rules.
=====================
- Configuration of hr_payroll for French localization
- All main contributions rules for French payslip, for 'cadre' and 'non-cadre'
- New payslip report
TODO:
-----
- Integration with holidays module for deduction and allowance
- Integration with hr_payroll_account for the automatic account_move_line
creation from the payslip
- Continue to integrate the contribution. Only the main contribution are
currently implemented
- Remake the report under webkit
- The payslip.line with appears_in_payslip = False should appears in the
payslip interface, but not in the payslip report
""",
'active': False,
'data': [
'l10n_fr_hr_payroll_view.xml',
'l10n_fr_hr_payroll_data.xml',
'views/report_l10nfrfichepaye.xml',
'l10n_fr_hr_payroll_reports.xml',
],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MikkCZ/kitsune | kitsune/sumo/tests/test_form_fields.py | 5 | 4151 | from django.core.exceptions import ValidationError
from django.utils import translation
from nose.tools import eq_
from kitsune.sumo.form_fields import _format_decimal, TypedMultipleChoiceField
from kitsune.sumo.tests import TestCase
class TestFormatDecimal(TestCase):
def test_default_locale(self):
"""Default locale just works"""
num = _format_decimal(1234.567)
eq_('1,234.567', num)
def test_fr_locale(self):
"""French locale returns french format"""
translation.activate('fr')
num = _format_decimal(1234.567)
eq_(u'1\xa0234,567', num)
def test_xx_YY_locale(self):
"""Falls back to English-like formatting for unknown locales"""
translation.activate('xx-YY')
num = _format_decimal(1234.567)
eq_('1,234.567', num)
def test_fy_NL_locale(self):
"""Falls back to English for unknown babel locales"""
# Note: if this starts to fail for no apparent reason, it's probably
# because babel learned about fy-NL since this test was written.
translation.activate('fy-NL')
eq_('fy-nl', translation.get_language())
num = _format_decimal(1234.567)
eq_('1,234.567', num)
class TypedMultipleChoiceFieldTestCase(TestCase):
"""TypedMultipleChoiceField is just like MultipleChoiceField
except, instead of validating, it coerces types."""
def assertRaisesErrorWithMessage(self, error, message, callable, *args,
**kwargs):
self.assertRaises(error, callable, *args, **kwargs)
try:
callable(*args, **kwargs)
except error, e:
eq_(message, str(e))
def test_typedmultiplechoicefield_71(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")],
coerce=int)
eq_([1], f.clean(['1']))
self.assertRaisesErrorWithMessage(
ValidationError,
"[u'Select a valid choice. 2 is not one of the available choices."
"']", f.clean, ['2'])
def test_typedmultiplechoicefield_72(self):
# Different coercion, same validation.
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")],
coerce=float)
eq_([1.0], f.clean(['1']))
def test_typedmultiplechoicefield_73(self):
# This can also cause weirdness: bool(-1) == True
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")],
coerce=bool)
eq_([True], f.clean(['-1']))
def test_typedmultiplechoicefield_74(self):
# Even more weirdness: if you have a valid choice but your coercion
# function can't coerce, you'll still get a validation error.
# Don't do this!
f = TypedMultipleChoiceField(choices=[('A', 'A'), ('B', 'B')],
coerce=int)
self.assertRaisesErrorWithMessage(
ValidationError,
"[u'Select a valid choice. B is not one of the available choices."
"']", f.clean, ['B'])
# Required fields require values
self.assertRaisesErrorWithMessage(
ValidationError, "[u'This field is required.']", f.clean, [])
def test_typedmultiplechoicefield_75(self):
# Non-required fields aren't required
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")],
coerce=int, required=False)
eq_([], f.clean([]))
def test_typedmultiplechoicefield_76(self):
# If you want cleaning an empty value to return a different type,
# tell the field
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")],
coerce=int, required=False,
empty_value=None)
eq_(None, f.clean([]))
def test_coerce_only(self):
"""No validation error raised in this case."""
f = TypedMultipleChoiceField(choices=[(1, '+1')], coerce=int,
coerce_only=True)
eq_([], f.clean(['2']))
| bsd-3-clause |
HAZARDU5/sgdialer | src/nz/co/hazardmedia/sgdialer/config/Config.py | 1 | 4698 | __author__ = 'Michael Andrew michael@hazardmedia.co.nz'
class Config(object):
""" Config class """
sound_path = 'assets/sounds' #path from src folder to sound assets
point_of_origin_code = 0 #index of the point of origin
address_data_file_path = "data/addresses.xml" #path from src folder to data file for stargate addresses
#use font Stargate SG-1 Address Glyphs
address_symbol_names = {#names of the symbols on the stargate and dhd
"?": "Unknown",
"1": "Earth", #letter A, key 1
#point of origin (in our alternate universe the point of origin acts as a terminator and is
#can be the same on all DHDs)
"2": "Crater", #letter B, key 2
"3": "Virgo", #letter C, key 3
"4": "Bootes", #letter D, key 4
"5": "Centaurus", #letter E, key 5
"6": "Libra", #letter F, key 6
"7": "Serpens Caput", #letter G, key 7
"8": "Norma", #letter H, key 8
"9": "Scorpio", #letter I, key 9
"10": "Cra", #letter J, key 0
"11": "Scutum", #letter K, key - (hyphen)
"12": "Sagittarius", #letter L, key q
"13": "Aquila", #letter M, key w
"14": "Mic", #letter N, key e
"15": "Capricorn", #letter O, key r
"16": "Pisces Austrinus", #letter P, key t
"17": "Equuleus", #letter Q, key y
"18": "Aquarius", #letter R, key u
"19": "Pegasus", #letter S, key i
"20": "Sculptor", #letter T, key o
"21": "Pisces", #letter U, key p
"22": "Andromeda", #letter V, key a
"23": "Triangulum", #letter W, key s
"24": "Aries", #letter X, key d
"25": "Perseus", #letter Y, key f
"26": "Cetus", #letter Z, key g
"27": "Taurus", #letter a, key h
"28": "Auriga", #letter b, key j
"29": "Eridanus", #letter c, key k
"30": "Orion", #letter d, key l
"31": "Canis Minor", #letter e, key z
"32": "Monoceros", #letter f, key x
"33": "Gemini", #letter g, key c
"34": "Hydra", #letter h, key v
"35": "Lynx", #letter i, key b
"36": "Cancer", #letter j, key n
"37": "Sextans", #letter k, key m
"38": "Leo Minor", #letter l, key , (comma)
"39": "Leo" #letter m, key . (period)
}
address_symbol_font_character = { #mappings of address symbol to font character
"1":"A",
"2":"B",
"3":"C",
"4":"D",
"5":"E",
"6":"F",
"7":"G",
"8":"H",
"9":"I",
"10":"J",
"11":"K",
"12":"L",
"13":"M",
"14":"N",
"15":"O",
"16":"P",
"17":"Q",
"18":"R",
"19":"S",
"20":"T",
"21":"U",
"22":"V",
"23":"W",
"24":"X",
"25":"Y",
"26":"Z",
"27":"a",
"28":"b",
"29":"c",
"30":"d",
"31":"e",
"32":"f",
"33":"g",
"34":"h",
"35":"i",
"36":"j",
"37":"k",
"38":"l",
"39":"m"
}
address_symbol_keyboard_character = { #mappings of address symbol to keyboard character
"1":"1",
"2":"2",
"3":"3",
"4":"4",
"5":"5",
"6":"6",
"7":"7",
"8":"8",
"9":"9",
"0":"10",
"-":"11",
"q":"12",
"w":"13",
"e":"14",
"r":"15",
"t":"16",
"y":"17",
"u":"18",
"i":"19",
"o":"20",
"p":"21",
"a":"22",
"s":"23",
"d":"24",
"f":"25",
"g":"26",
"h":"27",
"j":"28",
"k":"29",
"l":"30",
"z":"31",
"x":"32",
"c":"33",
"v":"34",
"b":"35",
"n":"36",
"m":"37",
",":"38",
".":"39"
} | mit |
40223149/2015springcda | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/color.py | 603 | 4330 | ## pygame - Python Game Library
## Copyright (C) 2000-2003 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
"""Manipulate colors"""
try:
from colordict import THECOLORS
except ImportError:
#the colordict module isn't available
THECOLORS = {}
def Color(colorname):
"""pygame.color.Color(colorname) -> RGBA
Get RGB values from common color names
The color name can be the name of a common english color,
or a "web" style color in the form of 0xFF00FF. The english
color names are defined by the standard 'rgb' colors for X11.
With the hex color formatting you may optionally include an
alpha value, the formatting is 0xRRGGBBAA. You may also specify
a hex formatted color by starting the string with a '#'.
The color name used is case insensitive and whitespace is ignored.
"""
if colorname[:2] == '0x' or colorname[0] == '#': #webstyle
if colorname[0] == '#':
colorname = colorname[1:]
else:
colorname = colorname[2:]
a = 255
try:
r = int('0x' + colorname[0:2], 16)
g = int('0x' + colorname[2:4], 16)
b = int('0x' + colorname[4:6], 16)
if len(colorname) > 6:
a = int('0x' + colorname[6:8], 16)
except ValueError:
raise ValueError("Illegal hex color")
return r, g, b, a
else: #color name
#no spaces and lowercase
name = colorname.replace(' ', '').lower()
try:
return THECOLORS[name]
except KeyError:
raise ValueError("Illegal color name, " + name)
def _splitcolor(color, defaultalpha=255):
try:
second = int(color)
r = g = b = color
a = defaultalpha
except TypeError:
if len(color) == 4:
r, g, b, a = color
elif len(color) == 3:
r, g, b = color
a = defaultalpha
return r, g, b, a
def add(color1, color2):
"""pygame.color.add(color1, color2) -> RGBA
add two colors
Add the RGB values of two colors together. If one of the
colors is only a single numeric value, it is applied to the
RGB components of the first color. Color values will be clamped
to the maximum color value of 255.
"""
r1, g1, b1, a1 = _splitcolor(color1)
r2, g2, b2, a2 = _splitcolor(color2)
m, i = min, int
return m(i(r1+r2), 255), m(i(g1+g2), 255), m(i(b1+b2), 255), m(i(a1+a2), 255)
def subtract(color1, color2):
"""pygame.color.subtract(color1, color2) -> RGBA
subtract two colors
Subtract the RGB values of two colors together. If one of the
colors is only a single numeric value, it is applied to the
RGB components of the first color. Color values will be clamped
to the minimum color value of 0.
"""
r1, g1, b1, a1 = _splitcolor(color1)
r2, g2, b2, a2 = _splitcolor(color2, 0)
m, i = max, int
return m(i(r1-r2), 0), m(i(g1-g2), 0), m(i(b1-b2), 0), m(i(a1-a2), 0)
def multiply(color1, color2):
"""pygame.color.multiply(color1, color2) -> RGBA
multiply two colors
Multiply the RGB values of two colors together. If one of the
colors is only a single numeric value, it is applied to the
RGB components of the first color.
"""
r1, g1, b1, a1 = _splitcolor(color1)
r2, g2, b2, a2 = _splitcolor(color2)
m, i = min, int
return m(i(r1*r2)/255, 255), m(i(g1*g2)/255, 255), m(i(b1*b2)/255, 255), m(i(a1*a2)/255, 255)
| gpl-3.0 |
chainer/chainer | examples/memnn/train_memnn.py | 6 | 4288 | #!/usr/bin/env python
import argparse
import collections
import warnings
import chainer
from chainer.training import extensions
import numpy
import babi
import memnn
def train(train_data_path, test_data_path, args):
device = chainer.get_device(args.device)
device.use()
vocab = collections.defaultdict(lambda: len(vocab))
vocab['<unk>'] = 0
train_data = babi.read_data(vocab, train_data_path)
test_data = babi.read_data(vocab, test_data_path)
print('Training data: %s: %d' % (train_data_path, len(train_data)))
print('Test data: %s: %d' % (test_data_path, len(test_data)))
train_data = memnn.convert_data(train_data, args.max_memory)
test_data = memnn.convert_data(test_data, args.max_memory)
encoder = memnn.make_encoder(args.sentence_repr)
network = memnn.MemNN(
args.unit, len(vocab), encoder, args.max_memory, args.hop)
model = chainer.links.Classifier(network, label_key='answer')
opt = chainer.optimizers.Adam()
model.to_device(device)
opt.setup(model)
train_iter = chainer.iterators.SerialIterator(
train_data, args.batchsize)
test_iter = chainer.iterators.SerialIterator(
test_data, args.batchsize, repeat=False, shuffle=False)
updater = chainer.training.StandardUpdater(train_iter, opt, device=device)
trainer = chainer.training.Trainer(updater, (args.epoch, 'epoch'))
@chainer.training.make_extension()
def fix_ignore_label(trainer):
network.fix_ignore_label()
trainer.extend(fix_ignore_label)
trainer.extend(extensions.Evaluator(test_iter, model, device=device))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy']))
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
if args.model:
memnn.save_model(args.model, model, vocab)
def main():
parser = argparse.ArgumentParser(
description='Chainer example: End-to-end memory networks')
parser.add_argument('TRAIN_DATA',
help='Path to training data in bAbI dataset '
'(e.g. "qa1_single-supporting-fact_train.txt")')
parser.add_argument('TEST_DATA',
help='Path to test data in bAbI dataset '
'(e.g. "qa1_single-supporting-fact_test.txt")')
parser.add_argument('--model', '-m', default='model',
help='Model directory where it stores trained model')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini batch')
parser.add_argument('--epoch', '-e', type=int, default=100,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--unit', '-u', type=int, default=20,
help='Number of units')
parser.add_argument('--hop', '-H', type=int, default=3,
help='Number of hops')
parser.add_argument('--max-memory', type=int, default=50,
help='Maximum number of memory')
parser.add_argument('--sentence-repr',
choices=['bow', 'pe'], default='bow',
help='Sentence representation. '
'Select from BoW ("bow") or position encoding ("pe")')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
train(args.TRAIN_DATA, args.TEST_DATA, args)
if __name__ == '__main__':
main()
| mit |
atiti/crashcollector | crashcollector.py | 1 | 11865 | #!/usr/bin/env python
from wsgiref.simple_server import make_server
import os, os.path, sys
import cgi, urlparse
import random, string
import subprocess
import time
site_name = 'http://crashes.sirikata.com'
id_file = 'data/__id'
def str_to_bool(v):
return not (len(v) == 0 or v.lower() in ['no', 'false', '0'])
def server_file(*args):
return os.path.join( os.path.dirname(__file__), *args )
def load_file(path):
f = open(server_file(path), 'rb')
r = f.read()
f.close()
return r
def save_file(path, data):
f = open(server_file(path), 'wb')
f.write(data)
f.close()
def get_id():
"""Get a unique id to assign to a report."""
if not os.path.exists(server_file(id_file)):
id = 0
else:
id = int( load_file(id_file) )
save_file(id_file, str(id+1))
return id
def random_key():
""" Get a random key to store with a dump that will enable editing."""
res = ''
random.seed()
for x in range(32):
res += random.choice(string.hexdigits)
return res
def id_exists(id):
"""Checks if we have state associated with an id."""
id_dir = server_file('data/' + str(id))
return os.path.exists(id_dir)
def id_date(id):
return time.gmtime(os.path.getctime(server_file('data/' + str(id) + '/key')))
def id_lookup(id):
"""Lookup all the data associated with an id."""
if not id_exists(id): return None
res = { 'id' : id }
id_dir = server_file('data/' + str(id))
res['dumps'] = [x for x in os.listdir(id_dir) if x.endswith('.dmp')]
if os.path.exists(id_dir + '/desc'):
res['desc'] = load_file(id_dir + '/desc')
else:
res ['desc'] = ''
res['key'] = load_file(id_dir + '/key')
res['date'] = id_date(id)
if os.path.exists(id_dir + '/version'):
res['version'] = load_file(id_dir + '/version')
else:
res['version'] = ''
if os.path.exists(id_dir + '/githash'):
res['githash'] = load_file(id_dir + '/githash')
else:
res['githash'] = ''
return res
def id_list():
ids = os.listdir(server_file('data/'))
ids = [int(x) for x in ids if x != '__id']
return ids
def id_link(id, text=None):
return '<a href="' + '/status/' + str(id) + '">' + (text or str(id)) + '</a>'
def id_dump_link(id, text=None):
return '<a href="' + '/data/' + str(id) + '/dump">' + (text or str(id)) + '</a>'
def id_get_stackwalk(id, dump, force=False):
"""Gets the stackwalk for a dump within a crash report. The report
is cached, but can be forced to regenerate using force=True."""
dump_file = server_file('data', str(id), dump)
stackwalk_file = server_file('data', str(id), dump + '.stackwalk')
if not force and os.path.exists(stackwalk_file):
return load_file(stackwalk_file)
bt = subprocess.Popen([server_file('minidump_stackwalk'), dump_file, server_file('symbols')], stdout=subprocess.PIPE).communicate()[0]
if bt:
save_file(stackwalk_file, bt)
return bt
return "Couldn't generate backtrace."
def decodable_revisions():
"""Gets the list of decodable revisions, i.e. those we should have symbols for."""
return load_file( server_file('decodable') ).split()
def is_post(environ):
return environ['REQUEST_METHOD'].upper() == 'POST'
def decode_post(environ):
if environ['REQUEST_METHOD'].upper() != 'POST':
return None
content_type = environ.get('CONTENT_TYPE', 'application/x-www-form-urlencoded')
if not (content_type.startswith('application/x-www-form-urlencoded') or
content_type.startswith('multipart/form-data')):
return None
post_form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ, keep_blank_values=True)
return post_form
def report(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
post_form = decode_post(environ)
if not post_form or not 'dump' in post_form or not 'dumpname' in post_form:
return ''
dump = post_form['dump'].value
dump_name = post_form['dumpname'].value
siri_version = post_form['version'].value
siri_githash = post_form['githash'].value
id = get_id()
# generate a magic key that allows editing
magic = random_key()
id_dir = 'data/' + str(id)
os.makedirs(server_file(id_dir))
save_file(id_dir + '/' + dump_name, dump)
save_file(id_dir + '/' + 'key', magic)
save_file(id_dir + '/' + 'version', siri_version)
save_file(id_dir + '/' + 'githash', siri_githash)
return [site_name + '/edit/' + str(id) + '?edit=' + magic]
def wrap_html(inner):
"""Wraps inner contents of an html page inside html and body tags."""
res = ['<html><body>']
if isinstance(inner, list):
res += inner
else:
res += [inner]
res += ['</body></html>']
return res
def status_page(environ, start_response, id):
status = '200 OK'
headers = [('Content-type', 'text/html')]
start_response(status, headers)
# Very simple check -- if we used POST (hit button rather than
# normal GETting the URL) then reanalyze.
reanalyze = is_post(environ)
dump = id_lookup(id)
if not dump:
return wrap_html('Report' + str(id) + ' not found.')
result = []
result += ['<h3>Report ', id_link(id), '</h3>']
result += ['<a href="/">home</a>', '<br>', '<br>']
result += ['<form action="/status/', str(id), '" method="POST">']
result += ['<input type="submit" value="Reanalyze"></input>']
result += ['</form>']
result += ['Date: ', time.strftime("%a, %d %b %Y %H:%M:%S", dump['date']), '<br>']
if dump['version']:
result += ['Version: ', dump['version'], '<br>']
if dump['githash']:
result += ['Git Revision: ', dump['githash'], '<br>']
if dump['desc']:
result += ['Description:<br><pre>', dump['desc'], '</pre>']
for d in dump['dumps']:
result += ['Dump: ', id_dump_link(id, d), '<br>']
bt = id_get_stackwalk(id, d, force=reanalyze)
result += ['<pre>', bt, '</pre>', '<br>']
return wrap_html(result)
def edit_page(environ, start_response, id):
status = '200 OK'
headers = [('Content-type', 'text/html')]
start_response(status, headers)
dump = id_lookup(id)
if not dump:
return wrap_html('Report' + str(id) + ' not found.')
# Edit has 2 cases. If this is a post, then we need to check keys, update
# the description, and then we give them the edit page again.
posted = False
post_form = decode_post(environ)
print >>environ['wsgi.errors'], post_form, dump['key']
if post_form and 'edit' in post_form and 'desc' in post_form:
if post_form['edit'].value == dump['key']:
posted = True
save_file
id_dir = server_file('data/' + str(id))
save_file(id_dir + '/desc', post_form['desc'].value)
dump['desc'] = post_form['desc'].value
# Otherwise, we need to use a query string in order to get everything
# in a URL when we direct the user to this page from the initial report.
query_string = environ.get("QUERY_STRING","").strip()
argset = urlparse.parse_qs(query_string, keep_blank_values=True, strict_parsing=False)
if ('edit' in argset and argset['edit'][0] == dump['key']) or posted:
result = []
result += ['<h3>Edit Report ', id_link(id), '</h3>']
if posted:
result += ['<h4>Successfully updated!</h4>']
result += ['<form action="/edit/', str(id), '" method="POST">']
result += ['<h4>Description</h4>']
result += ['<textarea name="desc" rows=30 cols=80>', dump['desc'] ,'</textarea><br>']
result += ['<input type="hidden" name="edit" value="' + dump['key'] + '"></input>']
result += ['<input type="submit" value="Update"></input>']
result += ['</form>']
return wrap_html(result)
# And if nothing else, they just aren't authorized.
return wrap_html('You can\'t edit report ' + str(id) + '.')
def data_file(environ, start_response, data_path):
# Format should be id/file_type
parts = data_path.split('/')
id, file_type = None, None
if len(parts) == 2:
id = int(parts[0])
file_type = parts[1]
if not id or not file_type or file_type not in ['dump']:
status = '404 Not Found'
headers = [('Content-type', 'text/html')]
start_response(status, headers)
return wrap_html('Invalid URL')
dump = id_lookup(id)
if not dump:
status = '200 OK'
headers = [('Content-type', 'text/html')]
start_response(status, headers)
return wrap_html('Report' + str(id) + ' not found.')
if file_type == 'dump':
dump_files = dump['dumps']
if len(dump_files) == 1:
status = '200 OK'
headers = [
('Content-type', 'application/octet-stream'),
('Content-Disposition', 'attachment; filename='+dump_files[0])
]
result = [ load_file(server_file('data', str(id), dump_files[0])) ]
else:
status = '404 Not Found'
headers = [('Content-type', 'text/html')]
result = wrap_html(['Download only works for single dump file.'])
else:
status = '404 Not Found'
headers = [('Content-type', 'text/html')]
start_response(status, headers)
return wrap_html('Invalid URL')
start_response(status, headers)
return result
def listing(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/html')]
start_response(status, headers)
query_string = environ.get("QUERY_STRING","").strip()
argset = urlparse.parse_qs(query_string, keep_blank_values=True, strict_parsing=False)
count = ('count' in argset and argset['count'][0]) or 100
require_decodable = ('decodable' in argset and str_to_bool(argset['decodable'][0]))
listing = id_list()
if not listing:
return wrap_html('No reports found.')
listing.sort()
listing.reverse()
decodable = decodable_revisions()
result = []
result += ['<h3>Report List</h3>']
if not require_decodable:
result += ['<a href="/listing?decodable=true">(List with only stacks we can decode)</a>']
result += ['<ul>']
for id in listing:
dump = id_lookup(id)
if dump['githash'] not in decodable and require_decodable: continue
count -= 1
if count == 0: break
result += ['<li>', id_link(id), ' - ', time.strftime("%d %b %Y", dump['date'])]
if dump['version']:
result += [' Version: ', dump['version']]
if dump['desc']:
result += [': <pre>', dump['desc'], '</pre>']
result += ['</li>']
result += ['</ul>']
return wrap_html(result)
def crashcollector_app(environ, start_response):
url = environ.get("REDIRECT_TEMPLATEPAGE", environ.get("REDIRECT_URL", None))
if url.startswith('/report'):
return report(environ, start_response)
elif url.startswith('/edit/'):
url = url.replace('/edit/', '')
return edit_page(environ, start_response, int(url))
elif url.startswith('/status/'):
url = url.replace('/status/', '')
return status_page(environ, start_response, int(url))
elif url.startswith('/data'):
url = url.replace('/data/', '')
return data_file(environ, start_response, url)
elif url.startswith('/listing') or url == '/':
return listing(environ, start_response)
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
return ['']
application = crashcollector_app
if __name__ == "__main__":
httpd = make_server('', 8000, crashcollector_app)
httpd.serve_forever()
| gpl-2.0 |
nachandr/cfme_tests | cfme/tests/candu/test_candu_manual.py | 2 | 11375 | """Manual tests"""
import pytest
from cfme import test_requirements
pytestmark = [
pytest.mark.manual,
]
@test_requirements.bottleneck
@pytest.mark.tier(2)
def test_bottleneck_datastore():
"""
Verify bottleneck events from host
Polarion:
assignee: gtalreja
casecomponent: Optimize
caseimportance: medium
initialEstimate: 3/4h
testtype: functional
"""
pass
@test_requirements.bottleneck
@pytest.mark.tier(2)
def test_bottleneck_provider():
"""
Verify bottleneck events from providers
Polarion:
assignee: gtalreja
casecomponent: Optimize
caseimportance: medium
initialEstimate: 3/4h
testtype: functional
"""
pass
@test_requirements.bottleneck
@pytest.mark.tier(2)
def test_bottleneck_host():
"""
Verify bottleneck events from host
Polarion:
assignee: gtalreja
casecomponent: Optimize
caseimportance: medium
initialEstimate: 3/4h
testtype: functional
"""
pass
@test_requirements.bottleneck
@pytest.mark.tier(2)
def test_bottleneck_cluster():
"""
Verify bottleneck events from cluster
Polarion:
assignee: gtalreja
casecomponent: Optimize
caseimportance: medium
initialEstimate: 3/4h
testtype: functional
"""
pass
@test_requirements.bottleneck
@pytest.mark.tier(1)
def test_bottleneck_summary_graph():
"""
test_bottleneck_summary_graph
Polarion:
assignee: gtalreja
casecomponent: Optimize
initialEstimate: 1/4h
testSteps:
1. setup c&u for provider and wait for bottleneck events
expectedResults:
1. summary graph is present and clickeble
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_crosshair_op_cluster_vsphere65():
"""
Requires:
C&U enabled Vsphere-65 appliance.
Steps:
1. Navigate to Clusters [Compute > infrastructure>Clusters]
2. Select any available cluster
3. Go for utilization graphs [Monitoring > Utilization]
4. Check data point on graphs ["CPU", "VM CPU state", "Memory", "Disk
I/O", "N/w I/O", "Host", "VMs"] using drilling operation on the data
points.
5. check "chart", "timeline" and "display" options working properly
or not.
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/12h
"""
pass
@pytest.mark.tier(2)
@test_requirements.c_and_u
def test_crosshair_op_azone_azure():
"""
Utilization Test
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/12h
"""
pass
@pytest.mark.tier(2)
@test_requirements.c_and_u
def test_crosshair_op_azone_ec2():
"""
test_crosshair_op_azone[ec2]
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/12h
testtype: functional
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_host_tagged_crosshair_op_vsphere65():
"""
Required C&U enabled application:1. Navigate to host C&U graphs
2. select Group by option with suitable VM tag
3. try to drill graph for VM
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/8h
startsin: 5.7
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_cluster_graph_by_vm_tag_vsphere65():
"""
test_cluster_graph_by_vm_tag[vsphere65]
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/12h
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_cluster_graph_by_host_tag_vsphere65():
"""
test_cluster_graph_by_host_tag[vsphere65]
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/12h
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_candu_graphs_vm_compare_host_vsphere65():
"""
test_candu_graphs_vm_compare_host[vsphere65]
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/6h
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_candu_graphs_vm_compare_cluster_vsphere65():
"""
test_candu_graphs_vm_compare_cluster[vsphere65]
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/6h
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_crosshair_op_vm_vsphere65():
"""
Requires:
C&U enabled Vsphere-65 appliance.
Steps:
1. Navigate to Datastores [Compute > infrastructure>VMs]
2. Select any available VM (cu24x7)
3. Go for utilization graphs [Monitoring > Utilization]
4. Check data point on graphs ["CPU", "VM CPU state", "Memory", "Disk
I/O", "N/w I/O"] using drilling operation on the data points.
5. check "chart" and "timeline" options working properly or not.
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/12h
"""
pass
@pytest.mark.tier(2)
@test_requirements.c_and_u
def test_crosshair_op_instance_azure():
"""
Utilization Test
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/12h
"""
pass
@pytest.mark.tier(2)
@test_requirements.c_and_u
def test_crosshair_op_instance_ec2():
"""
Verify that the following cross-hair operations can be performed on
each of the C&U graphs for an instance:
1.Chart
1.1 Hourly for this day and then back to daily
2.Timeline
2.1 Daily events on this VM
2.2 Hourly events for this VM
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/12h
testtype: functional
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_crosshair_op_datastore_vsphere65():
"""
Requires:
C&U enabled Vsphere-65 appliance.
Steps:
1. Navigate to Datastores [Compute > infrastructure>Datastores]
2. Select any available datastore
3. Go for utilization graphs [Monitoring > Utilization]
4. Check data point on graphs ["Used Disk Space", "Hosts", "VMs"]
using drilling operation on the data points.
5. check "chart" and "display" option working properly or not.
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/12h
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_group_by_tag_azone_azure():
"""
Utilization Test
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: low
initialEstimate: 1/12h
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_azone_group_by_tag_ec2():
"""
test_azone_group_by_tag[ec2]
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: low
initialEstimate: 1/12h
testtype: functional
"""
pass
@pytest.mark.tier(2)
@test_requirements.c_and_u
def test_candu_graphs_datastore_vsphere6():
"""
test_candu_graphs_datastore[vsphere6]
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: low
initialEstimate: 1/12h
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_crosshair_op_host_vsphere65():
"""
Requires:
C&U enabled Vsphere-65 appliance.
Steps:
1. Navigate to Hosts [Compute > infrastructure>Hosts]
2. Select any available host
3. Go for utilization graphs [Monitoring > Utilization]
4. Check data point on graphs ["CPU", "VM CPU state", "Memory", "Disk
I/O", "N/w I/O", VMs] using drilling operation on the data points.
5. check "chart", "timeline" and "display" option working properly or
not.
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/12h
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_candu_collection_tab():
"""
Test case to cover -
Bugzilla:
1393675
from BZ comments:
"for QE testing you can only replicate that in the UI by running a
refresh and immediately destroying the provider and hope that it runs
into this race conditions."
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/4h
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_cluster_tagged_crosshair_op_vsphere65():
"""
Required C&U enabled application:1. Navigate to cluster C&U graphs
2. select Group by option with suitable VM/Host tag
3. try to drill graph for VM/Host
Polarion:
assignee: gtalreja
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/8h
startsin: 5.7
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
def test_ec2_instance_memory_metrics():
"""
Bugzilla:
1684525
Polarion:
assignee: gtalreja
casecomponent: Cloud
initialEstimate: 1h
caseimportance: medium
casecomponent: CandU
testSteps:
1. Setup EC2 instance with CloudWatch Metrics Agent(https://docs.aws.amazon.com/
AmazonCloudWatch/latest/monitoring/metrics-collected-by-CloudWatch-agent.html)
2. Enable Memory metrics
3. Add EC2 Provider to CFME
4. Wait at least 30 minutes
5. Go to Compute -> Cloud -> Instances
6. Select instance with CloudWatch Metrics Agent
7. Go to its memory metrics.
expectedResults:
1.
2.
3.
4.
5.
6.
7. Memory metrics should have data.
"""
pass
@pytest.mark.tier(3)
@test_requirements.c_and_u
@pytest.mark.meta(coverage=[1776684])
def test_candu_verify_global_utilization_metrics():
"""
Bugzilla:
1776684
Polarion:
assignee: gtalreja
casecomponent: CandU
initialEstimate: 1h
caseimportance: medium
startsin: 5.10
testSteps:
1. Set up replication with 2 appliances global and remote
2. Enable C&U data on both appliances
3. Add provider on the remote, check data on the provider's dashboard
4. Add same provider on the global, check data on the provider's dashboard
5. Wait for at least 1 day for "Global Utilization" tab for providers
expectedResults:
1.
2.
3.
4.
5. Metrics should be same for Global and Remote regions.
"""
pass
| gpl-2.0 |
ptisserand/ansible | lib/ansible/modules/network/netscaler/netscaler_save_config.py | 67 | 4872 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_save_config
short_description: Save Netscaler configuration.
description:
- This module uncoditionally saves the configuration on the target netscaler node.
- This module does not support check mode.
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
nsip:
description:
- The ip address of the netscaler appliance where the nitro API calls will be made.
- "The port can be specified with the colon (:). E.g. C(192.168.1.1:555)."
required: True
nitro_user:
description:
- The username with which to authenticate to the netscaler node.
required: True
nitro_pass:
description:
- The password with which to authenticate to the netscaler node.
required: True
nitro_protocol:
choices: [ 'http', 'https' ]
default: http
description:
- Which protocol to use when accessing the nitro API objects.
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
nitro_timeout:
description:
- Time in seconds until a timeout error is thrown when establishing a new session with Netscaler.
default: 310
requirements:
- nitro python sdk
'''
EXAMPLES = '''
---
- name: Save netscaler configuration
delegate_to: localhost
netscaler_save_config:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
- name: Setup server without saving configuration
delegate_to: localhost
notify: Save configuration
netscaler_server:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
save_config: no
name: server-1
ipaddress: 192.168.1.1
# Under playbook's handlers
- name: Save configuration
delegate_to: localhost
netscaler_save_config:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: ['message 1', 'message 2']
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
'''
import copy
try:
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import get_nitro_client, log, loglines, netscaler_common_arguments
def main():
argument_spec = copy.deepcopy(netscaler_common_arguments)
# Delete common arguments irrelevant to this module
del argument_spec['state']
del argument_spec['save_config']
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
try:
log('Saving configuration')
client.save_config()
except nitro_exception as e:
msg = "nitro exception errorcode=" + str(e.errorcode) + ",message=" + e.message
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| gpl-3.0 |
jnerin/ansible | lib/ansible/modules/files/archive.py | 58 | 17351 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ben Doherty <bendohmv@gmail.com>
# Sponsored by Oomph, Inc. http://www.oomphinc.com
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: archive
version_added: '2.3'
short_description: Creates a compressed archive of one or more files or trees
extends_documentation_fragment: files
description:
- Packs an archive. It is the opposite of M(unarchive). By default, it assumes the compression source exists on the target. It will not copy the
source file from the local system to the target before archiving. Source files can be deleted after archival by specifying I(remove=True).
options:
path:
description:
- Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive.
required: true
format:
description:
- The type of compression to use.
- Support for xz was added in version 2.5.
choices: [ bz2, gz, tar, xz, zip ]
default: gz
dest:
description:
- The file name of the destination archive. This is required when C(path) refers to multiple files by either specifying a glob, a directory or
multiple paths in a list.
exclude_path:
version_added: '2.4'
description:
- Remote absolute path, glob, or list of paths or globs for the file or files to exclude from the archive
remove:
description:
- Remove any added source files and trees after adding to archive.
type: bool
default: 'no'
author:
- Ben Doherty (@bendoh)
notes:
- requires tarfile, zipfile, gzip and bzip2 packages on target host
- requires lzma or backports.lzma if using xz format
- can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives
'''
EXAMPLES = '''
- name: Compress directory /path/to/foo/ into /path/to/foo.tgz
archive:
path: /path/to/foo
dest: /path/to/foo.tgz
- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it
archive:
path: /path/to/foo
remove: yes
- name: Create a zip archive of /path/to/foo
archive:
path: /path/to/foo
format: zip
- name: Create a bz2 archive of multiple files, rooted at /path
archive:
path:
- /path/to/foo
- /path/wong/foo
dest: /path/file.tar.bz2
format: bz2
- name: Create a bz2 archive of a globbed path, while excluding specific dirnames
archive:
path:
- /path/to/foo/*
dest: /path/file.tar.bz2
exclude_path:
- /path/to/foo/bar
- /path/to/foo/baz
format: bz2
- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames
archive:
path:
- /path/to/foo/*
dest: /path/file.tar.bz2
exclude_path:
- /path/to/foo/ba*
format: bz2
'''
RETURN = '''
state:
description:
The current state of the archived file.
If 'absent', then no source files were found and the archive does not exist.
If 'compress', then the file source file is in the compressed state.
If 'archive', then the source file or paths are currently archived.
If 'incomplete', then an archive was created, but not all source paths were found.
type: string
returned: always
missing:
description: Any files that were missing from the source.
type: list
returned: success
archived:
description: Any files that were compressed or added to the archive.
type: list
returned: success
arcroot:
description: The archive root.
type: string
returned: always
expanded_paths:
description: The list of matching paths from paths argument.
type: list
returned: always
expanded_exclude_paths:
description: The list of matching exclude paths from the exclude_path argument.
type: list
returned: always
'''
import bz2
import filecmp
import glob
import gzip
import io
import os
import re
import shutil
import tarfile
import zipfile
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.six import PY3
if PY3:
try:
import lzma
HAS_LZMA = True
except ImportError:
HAS_LZMA = False
else:
try:
from backports import lzma
HAS_LZMA = True
except ImportError:
HAS_LZMA = False
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='list', required=True),
format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']),
dest=dict(type='path'),
exclude_path=dict(type='list'),
remove=dict(type='bool', default=False),
),
add_file_common_args=True,
supports_check_mode=True,
)
params = module.params
check_mode = module.check_mode
paths = params['path']
dest = params['dest']
exclude_paths = params['exclude_path']
remove = params['remove']
expanded_paths = []
expanded_exclude_paths = []
format = params['format']
globby = False
changed = False
state = 'absent'
# Simple or archive file compression (inapplicable with 'zip' since it's always an archive)
archive = False
successes = []
# Fail early
if not HAS_LZMA and format == 'xz':
module.fail_json(msg="lzma or backports.lzma is required when using xz format.")
for path in paths:
path = os.path.expanduser(os.path.expandvars(path))
# Expand any glob characters. If found, add the expanded glob to the
# list of expanded_paths, which might be empty.
if ('*' in path or '?' in path):
expanded_paths = expanded_paths + glob.glob(path)
globby = True
# If there are no glob characters the path is added to the expanded paths
# whether the path exists or not
else:
expanded_paths.append(path)
# Only attempt to expand the exclude paths if it exists
if exclude_paths:
for exclude_path in exclude_paths:
exclude_path = os.path.expanduser(os.path.expandvars(exclude_path))
# Expand any glob characters. If found, add the expanded glob to the
# list of expanded_paths, which might be empty.
if ('*' in exclude_path or '?' in exclude_path):
expanded_exclude_paths = expanded_exclude_paths + glob.glob(exclude_path)
# If there are no glob character the exclude path is added to the expanded
# exclude paths whether the path exists or not.
else:
expanded_exclude_paths.append(exclude_path)
if not expanded_paths:
return module.fail_json(path=', '.join(paths), expanded_paths=', '.join(expanded_paths), msg='Error, no source paths were found')
# If we actually matched multiple files or TRIED to, then
# treat this as a multi-file archive
archive = globby or os.path.isdir(expanded_paths[0]) or len(expanded_paths) > 1
# Default created file name (for single-file archives) to
# <file>.<format>
if not dest and not archive:
dest = '%s.%s' % (expanded_paths[0], format)
# Force archives to specify 'dest'
if archive and not dest:
module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees')
archive_paths = []
missing = []
arcroot = ''
for path in expanded_paths:
# Use the longest common directory name among all the files
# as the archive root path
if arcroot == '':
arcroot = os.path.dirname(path) + os.sep
else:
for i in range(len(arcroot)):
if path[i] != arcroot[i]:
break
if i < len(arcroot):
arcroot = os.path.dirname(arcroot[0:i + 1])
arcroot += os.sep
# Don't allow archives to be created anywhere within paths to be removed
if remove and os.path.isdir(path) and dest.startswith(path):
module.fail_json(path=', '.join(paths), msg='Error, created archive can not be contained in source paths when remove=True')
if os.path.lexists(path) and path not in expanded_exclude_paths:
archive_paths.append(path)
else:
missing.append(path)
# No source files were found but the named archive exists: are we 'compress' or 'archive' now?
if len(missing) == len(expanded_paths) and dest and os.path.exists(dest):
# Just check the filename to know if it's an archive or simple compressed file
if re.search(r'(\.tar|\.tar\.gz|\.tgz|\.tbz2|\.tar\.bz2|\.tar\.xz|\.zip)$', os.path.basename(dest), re.IGNORECASE):
state = 'archive'
else:
state = 'compress'
# Multiple files, or globbiness
elif archive:
if not archive_paths:
# No source files were found, but the archive is there.
if os.path.lexists(dest):
state = 'archive'
elif missing:
# SOME source files were found, but not all of them
state = 'incomplete'
archive = None
size = 0
errors = []
if os.path.lexists(dest):
size = os.path.getsize(dest)
if state != 'archive':
if check_mode:
changed = True
else:
try:
# Slightly more difficult (and less efficient!) compression using zipfile module
if format == 'zip':
arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED, True)
# Easier compression using tarfile module
elif format == 'gz' or format == 'bz2':
arcfile = tarfile.open(dest, 'w|' + format)
# python3 tarfile module allows xz format but for python2 we have to create the tarfile
# in memory and then compress it with lzma.
elif format == 'xz':
arcfileIO = io.BytesIO()
arcfile = tarfile.open(fileobj=arcfileIO, mode='w')
# Or plain tar archiving
elif format == 'tar':
arcfile = tarfile.open(dest, 'w')
match_root = re.compile('^%s' % re.escape(arcroot))
for path in archive_paths:
if os.path.isdir(path):
# Recurse into directories
for dirpath, dirnames, filenames in os.walk(path, topdown=True):
if not dirpath.endswith(os.sep):
dirpath += os.sep
for dirname in dirnames:
fullpath = dirpath + dirname
arcname = match_root.sub('', fullpath)
try:
if format == 'zip':
arcfile.write(fullpath, arcname)
else:
arcfile.add(fullpath, arcname, recursive=False)
except Exception as e:
errors.append('%s: %s' % (fullpath, to_native(e)))
for filename in filenames:
fullpath = dirpath + filename
arcname = match_root.sub('', fullpath)
if not filecmp.cmp(fullpath, dest):
try:
if format == 'zip':
arcfile.write(fullpath, arcname)
else:
arcfile.add(fullpath, arcname, recursive=False)
successes.append(fullpath)
except Exception as e:
errors.append('Adding %s: %s' % (path, to_native(e)))
else:
if format == 'zip':
arcfile.write(path, match_root.sub('', path))
else:
arcfile.add(path, match_root.sub('', path), recursive=False)
successes.append(path)
except Exception as e:
module.fail_json(msg='Error when writing %s archive at %s: %s' % (format == 'zip' and 'zip' or ('tar.' + format), dest, to_native(e)),
exception=format_exc())
if arcfile:
arcfile.close()
state = 'archive'
if format == 'xz':
with lzma.open(dest, 'wb') as f:
f.write(arcfileIO.getvalue())
arcfileIO.close()
if errors:
module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors)))
if state in ['archive', 'incomplete'] and remove:
for path in successes:
try:
if os.path.isdir(path):
shutil.rmtree(path)
elif not check_mode:
os.remove(path)
except OSError as e:
errors.append(path)
if errors:
module.fail_json(dest=dest, msg='Error deleting some source files: ' + str(e), files=errors)
# Rudimentary check: If size changed then file changed. Not perfect, but easy.
if not check_mode and os.path.getsize(dest) != size:
changed = True
if successes and state != 'incomplete':
state = 'archive'
# Simple, single-file compression
else:
path = expanded_paths[0]
# No source or compressed file
if not (os.path.exists(path) or os.path.lexists(dest)):
state = 'absent'
# if it already exists and the source file isn't there, consider this done
elif not os.path.lexists(path) and os.path.lexists(dest):
state = 'compress'
else:
if module.check_mode:
if not os.path.exists(dest):
changed = True
else:
size = 0
f_in = f_out = arcfile = None
if os.path.lexists(dest):
size = os.path.getsize(dest)
try:
if format == 'zip':
arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED, True)
arcfile.write(path, path[len(arcroot):])
arcfile.close()
state = 'archive' # because all zip files are archives
else:
f_in = open(path, 'rb')
if format == 'gz':
f_out = gzip.open(dest, 'wb')
elif format == 'bz2':
f_out = bz2.BZ2File(dest, 'wb')
elif format == 'xz':
f_out = lzma.LZMAFile(dest, 'wb')
else:
raise OSError("Invalid format")
shutil.copyfileobj(f_in, f_out)
successes.append(path)
except OSError as e:
module.fail_json(path=path, dest=dest, msg='Unable to write to compressed file: %s' % to_native(e), exception=format_exc())
if arcfile:
arcfile.close()
if f_in:
f_in.close()
if f_out:
f_out.close()
# Rudimentary check: If size changed then file changed. Not perfect, but easy.
if os.path.getsize(dest) != size:
changed = True
state = 'compress'
if remove and not check_mode:
try:
os.remove(path)
except OSError as e:
module.fail_json(path=path, msg='Unable to remove source file: %s' % to_native(e), exception=format_exc())
params['path'] = dest
file_args = module.load_file_common_arguments(params)
if not check_mode:
changed = module.set_fs_attributes_if_different(file_args, changed)
module.exit_json(archived=successes,
dest=dest,
changed=changed,
state=state,
arcroot=arcroot,
missing=missing,
expanded_paths=expanded_paths,
expanded_exclude_paths=expanded_exclude_paths)
if __name__ == '__main__':
main()
| gpl-3.0 |
timokoola/timoechobot | docutils/parsers/rst/languages/ja.py | 128 | 3863 | # -*- coding: utf-8 -*-
# $Id: ja.py 7119 2011-09-02 13:00:23Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Japanese-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
# Corrections to these translations are welcome!
# 間違いがあれば、どうぞ正しい翻訳を教えて下さい。
directives = {
# language-dependent: fixed
u'注目': 'attention',
u'注意': 'caution',
u'code (translation required)': 'code',
u'危険': 'danger',
u'エラー': 'error',
u'ヒント': 'hint',
u'重要': 'important',
u'備考': 'note',
u'通報': 'tip',
u'警告': 'warning',
u'戒告': 'admonition',
u'サイドバー': 'sidebar',
u'トピック': 'topic',
u'ラインブロック': 'line-block',
u'パーズドリテラル': 'parsed-literal',
u'ルブリック': 'rubric',
u'エピグラフ': 'epigraph',
u'題言': 'epigraph',
u'ハイライト': 'highlights',
u'見所': 'highlights',
u'プルクオート': 'pull-quote',
u'合成': 'compound',
u'コンテナー': 'container',
u'容器': 'container',
u'表': 'table',
u'csv表': 'csv-table',
u'リスト表': 'list-table',
#u'質問': 'questions',
#u'問答': 'questions',
#u'faq': 'questions',
u'math (translation required)': 'math',
u'メタ': 'meta',
#u'イメージマプ': 'imagemap',
u'イメージ': 'image',
u'画像': 'image',
u'フィグア': 'figure',
u'図版': 'figure',
u'インクルード': 'include',
u'含む': 'include',
u'組み込み': 'include',
u'生': 'raw',
u'原': 'raw',
u'換える': 'replace',
u'取り換える': 'replace',
u'掛け替える': 'replace',
u'ユニコード': 'unicode',
u'日付': 'date',
u'クラス': 'class',
u'ロール': 'role',
u'役': 'role',
u'ディフォルトロール': 'default-role',
u'既定役': 'default-role',
u'タイトル': 'title',
u'題': 'title', # 題名 件名
u'目次': 'contents',
u'節数': 'sectnum',
u'ヘッダ': 'header',
u'フッタ': 'footer',
#u'脚注': 'footnotes', # 脚註?
#u'サイテーション': 'citations', # 出典 引証 引用
u'ターゲットノート': 'target-notes', # 的注 的脚注
}
"""Japanese name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'略': 'abbreviation',
u'頭字語': 'acronym',
u'code (translation required)': 'code',
u'インデックス': 'index',
u'索引': 'index',
u'添字': 'subscript',
u'下付': 'subscript',
u'下': 'subscript',
u'上付': 'superscript',
u'上': 'superscript',
u'題参照': 'title-reference',
u'pep参照': 'pep-reference',
u'rfc参照': 'rfc-reference',
u'強調': 'emphasis',
u'強い': 'strong',
u'リテラル': 'literal',
u'整形済み': 'literal',
u'math (translation required)': 'math',
u'名付参照': 'named-reference',
u'無名参照': 'anonymous-reference',
u'脚注参照': 'footnote-reference',
u'出典参照': 'citation-reference',
u'代入参照': 'substitution-reference',
u'的': 'target',
u'uri参照': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
u'生': 'raw',}
"""Mapping of Japanese role names to canonical role names for interpreted
text."""
| apache-2.0 |
McNetic/CouchPotatoServer-de | couchpotato/core/media/movie/providers/info/omdbapi.py | 1 | 5734 | import json
import re
import traceback
from couchpotato import Env
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt, tryFloat, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider
log = CPLog(__name__)
autoload = 'OMDBAPI'
class OMDBAPI(MovieProvider):
urls = {
'search': 'http://www.omdbapi.com/?type=movie&%s',
'info': 'http://www.omdbapi.com/?type=movie&i=%s',
}
http_time_between_calls = 0
def __init__(self):
addEvent('info.search', self.search)
addEvent('movie.search', self.search)
addEvent('movie.info', self.getInfo)
def search(self, q, limit = 12):
if self.isSearchDisabled():
return []
name_year = fireEvent('scanner.name_year', q, single = True)
if not name_year or (name_year and not name_year.get('name')):
name_year = {
'name': q
}
cache_key = 'omdbapi.cache.%s' % q
url = self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')})
cached = self.getCache(cache_key, url, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
if cached:
result = self.parseMovie(cached)
if result.get('titles') and len(result.get('titles')) > 0:
log.info('Found: %s', result['titles'][0] + ' (' + str(result.get('year')) + ')')
return [result]
return []
return []
def getInfo(self, identifier = None, **kwargs):
if not identifier:
return {}
if self.isInfoDisabled():
return {}
cache_key = 'omdbapi.cache.%s' % identifier
cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})
if cached:
result = self.parseMovie(cached)
if result.get('titles') and len(result.get('titles')) > 0:
log.info('Found: %s', result['titles'][0] + ' (' + str(result['year']) + ')')
return result
return {}
def parseMovie(self, movie):
movie_data = {}
try:
try:
if isinstance(movie, (str, unicode)):
movie = json.loads(movie)
except ValueError:
log.info('No proper json to decode')
return movie_data
if movie.get('Response') == 'Parse Error' or movie.get('Response') == 'False':
return movie_data
if movie.get('Type').lower() != 'movie':
return movie_data
tmp_movie = movie.copy()
for key in tmp_movie:
if tmp_movie.get(key).lower() == 'n/a':
del movie[key]
year = tryInt(movie.get('Year', ''))
movie_data = {
'type': 'movie',
'via_imdb': True,
'titles': [movie.get('Title')] if movie.get('Title') else [],
'original_title': movie.get('Title'),
'images': {
'poster': [movie.get('Poster', '')] if movie.get('Poster') and len(movie.get('Poster', '')) > 4 else [],
},
'rating': {
'imdb': (tryFloat(movie.get('imdbRating', 0)), tryInt(movie.get('imdbVotes', '').replace(',', ''))),
#'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))),
},
'imdb': str(movie.get('imdbID', '')),
'mpaa': str(movie.get('Rated', '')),
'runtime': self.runtimeToMinutes(movie.get('Runtime', '')),
'released': movie.get('Released'),
'year': year if isinstance(year, int) else None,
'plot': movie.get('Plot'),
'genres': splitString(movie.get('Genre', '')),
'directors': splitString(movie.get('Director', '')),
'writers': splitString(movie.get('Writer', '')),
'actors': splitString(movie.get('Actors', '')),
}
movie_data = dict((k, v) for k, v in movie_data.items() if v)
except:
log.error('Failed parsing IMDB API json: %s', traceback.format_exc())
return movie_data
def runtimeToMinutes(self, runtime_str):
runtime = 0
regex = '(\d*.?\d+).(h|hr|hrs|mins|min)+'
matches = re.findall(regex, runtime_str)
for match in matches:
nr, size = match
runtime += tryInt(nr) * (60 if 'h' is str(size)[0] else 1)
return runtime
config = [{
'name': 'omdbapi',
'order': 10,
'groups': [
{
'tab': 'databases',
'name': 'omdb',
'label': 'Open Media Database',
'description': 'Used for all calls to OMDB.',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': True,
},
{
'name': 'search_enabled',
'label': 'Enabled for movie search',
'type': 'bool',
'default': True,
},
{
'name': 'info_enabled',
'label': 'Enabled for movie info download',
'type': 'bool',
'default': True,
},
],
},
],
}]
| gpl-3.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/edx_management_commands/management_commands/management/commands/manage_group.py | 52 | 4853 | """
Management command `manage_group` is used to idempotently create Django groups
and set their permissions by name.
"""
from django.apps import apps
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.utils.translation import gettext as _
class Command(BaseCommand):
# pylint: disable=missing-docstring
help = 'Creates the specified group, if it does not exist, and sets its permissions.'
def add_arguments(self, parser):
parser.add_argument('group_name')
parser.add_argument('--remove', dest='is_remove', action='store_true')
parser.add_argument('-p', '--permissions', nargs='*', default=[])
def _handle_remove(self, group_name):
try:
Group.objects.get(name=group_name).delete() # pylint: disable=no-member
self.stderr.write(_('Removed group: "{}"').format(group_name))
except Group.DoesNotExist:
self.stderr.write(_('Did not find a group with name "{}" - skipping.').format(group_name))
@transaction.atomic
def handle(self, group_name, is_remove, permissions=None, *args, **options):
if is_remove:
self._handle_remove(group_name)
return
old_permissions = set()
group, created = Group.objects.get_or_create(name=group_name) # pylint: disable=no-member
if created:
try:
# Needed for sqlite backend (i.e. in tests) because
# name.max_length won't be enforced by the db.
# See also http://www.sqlite.org/faq.html#q9
group.full_clean()
except ValidationError as exc:
# give a more helpful error
raise CommandError(
_(
'Invalid group name: "{group_name}". {messages}'
).format(
group_name=group_name,
messages=exc.messages[0]
)
)
self.stderr.write(_('Created new group: "{}"').format(group_name))
else:
self.stderr.write(_('Found existing group: "{}"').format(group_name))
old_permissions = set(group.permissions.all())
new_permissions = self._resolve_permissions(permissions or set())
add_permissions = new_permissions - old_permissions
remove_permissions = old_permissions - new_permissions
self.stderr.write(
_(
'Adding {codenames} permissions to group "{group}"'
).format(
codenames=[ap.name for ap in add_permissions],
group=group.name
)
)
self.stderr.write(
_(
'Removing {codenames} permissions from group "{group}"'
).format(
codenames=[rp.codename for rp in remove_permissions],
group=group.name
)
)
group.permissions = new_permissions
group.save()
def _resolve_permissions(self, permissions):
new_permissions = set()
for permission in permissions:
try:
app_label, model_name, codename = permission.split(':')
except ValueError:
# give a more helpful error
raise CommandError(_(
'Invalid permission option: "{}". Please specify permissions '
'using the format: app_label:model_name:permission_codename.'
).format(permission))
# this will raise a LookupError if it fails.
try:
model_class = apps.get_model(app_label, model_name)
except LookupError as exc:
raise CommandError(str(exc))
content_type = ContentType.objects.get_for_model(model_class)
try:
new_permission = Permission.objects.get( # pylint: disable=no-member
content_type=content_type,
codename=codename,
)
except Permission.DoesNotExist:
# give a more helpful error
raise CommandError(
_(
'Invalid permission codename: "{codename}". No such permission exists '
'for the model {module}.{model_name}.'
).format(
codename=codename,
module=model_class.__module__,
model_name=model_class.__name__,
)
)
new_permissions.add(new_permission)
return new_permissions
| agpl-3.0 |
mariusbaumann/pyload | module/plugins/hoster/FilesMailRu.py | 1 | 3878 | # -*- coding: utf-8 -*-
import re
from module.network.RequestFactory import getURL
from module.plugins.Hoster import Hoster
from module.plugins.Plugin import chunks
def getInfo(urls):
result = []
for chunk in chunks(urls, 10):
for url in chunk:
html = getURL(url)
if r'<div class="errorMessage mb10">' in html:
result.append((url, 0, 1, url))
elif r'Page cannot be displayed' in html:
result.append((url, 0, 1, url))
else:
try:
url_pattern = '<a href="(.+?)" onclick="return Act\(this\, \'dlink\'\, event\)">(.+?)</a>'
file_name = re.search(url_pattern, html).group(0).split(', event)">')[1].split('</a>')[0]
result.append((file_name, 0, 2, url))
except:
pass
# status 1=OFFLINE, 2=OK, 3=UNKNOWN
# result.append((#name,#size,#status,#url))
yield result
class FilesMailRu(Hoster):
__name__ = "FilesMailRu"
__type__ = "hoster"
__version__ = "0.31"
__pattern__ = r'http://(?:www\.)?files\.mail\.ru/.+'
__description__ = """Files.mail.ru hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("oZiRiz", "ich@oziriz.de")]
def setup(self):
if not self.account:
self.multiDL = False
def process(self, pyfile):
self.html = self.load(pyfile.url)
self.url_pattern = '<a href="(.+?)" onclick="return Act\(this\, \'dlink\'\, event\)">(.+?)</a>'
#marks the file as "offline" when the pattern was found on the html-page'''
if r'<div class="errorMessage mb10">' in self.html:
self.offline()
elif r'Page cannot be displayed' in self.html:
self.offline()
#the filename that will be showed in the list (e.g. test.part1.rar)'''
pyfile.name = self.getFileName()
#prepare and download'''
if not self.account:
self.prepare()
self.download(self.getFileUrl())
self.myPostProcess()
else:
self.download(self.getFileUrl())
self.myPostProcess()
def prepare(self):
"""You have to wait some seconds. Otherwise you will get a 40Byte HTML Page instead of the file you expected"""
self.setWait(10)
self.wait()
return True
def getFileUrl(self):
"""gives you the URL to the file. Extracted from the Files.mail.ru HTML-page stored in self.html"""
return re.search(self.url_pattern, self.html).group(0).split('<a href="')[1].split('" onclick="return Act')[0]
def getFileName(self):
"""gives you the Name for each file. Also extracted from the HTML-Page"""
return re.search(self.url_pattern, self.html).group(0).split(', event)">')[1].split('</a>')[0]
def myPostProcess(self):
# searches the file for HTMl-Code. Sometimes the Redirect
# doesn't work (maybe a curl Problem) and you get only a small
# HTML file and the Download is marked as "finished"
# then the download will be restarted. It's only bad for these
# who want download a HTML-File (it's one in a million ;-) )
#
# The maximum UploadSize allowed on files.mail.ru at the moment is 100MB
# so i set it to check every download because sometimes there are downloads
# that contain the HTML-Text and 60MB ZEROs after that in a xyzfile.part1.rar file
# (Loading 100MB in to ram is not an option)
check = self.checkDownload({"html": "<meta name="}, read_size=50000)
if check == "html":
self.logInfo(_(
"There was HTML Code in the Downloaded File (%s)...redirect error? The Download will be restarted." %
self.pyfile.name))
self.retry()
| gpl-3.0 |
duqiao/django | tests/timezones/tests.py | 165 | 57662 | from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from unittest import SkipTest, skipIf
from xml.dom.minidom import parseString
from django.contrib.auth.models import User
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import connection, connections
from django.db.models import Max, Min
from django.http import HttpRequest
from django.template import (
Context, RequestContext, Template, TemplateSyntaxError, context_processors,
)
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, override_settings,
skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.utils import six, timezone
from .forms import (
EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm,
EventSplitForm,
)
from .models import (
AllDayEvent, Event, MaybeEvent, Session, SessionEvent, Timestamp,
)
try:
import pytz
except ImportError:
pytz = None
requires_pytz = skipIf(pytz is None, "this test requires pytz")
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unspported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 4, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists())
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class NewDatabaseTests(TestCase):
@requires_tz_support
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
def test_datetime_from_date(self):
dt = datetime.date(2011, 9, 1)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT))
@requires_tz_support
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(microsecond=0, tzinfo=EAT))
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_auto_now_and_auto_now_add(self):
now = timezone.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
@requires_pytz
def test_query_filter_with_pytz_timezones(self):
tz = pytz.timezone('Europe/Paris')
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz)
Event.objects.create(dt=dt)
next = dt + datetime.timedelta(seconds=3)
prev = dt - datetime.timedelta(seconds=3)
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1)
self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1)
@requires_tz_support
def test_query_filter_with_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
dt = dt.replace(tzinfo=None)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
# naive datetimes are interpreted in local time
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0)
self.assertEqual(len(recorded), 3)
for warning in recorded:
msg = str(warning.message)
self.assertTrue(msg.startswith("DateTimeField Event.dt "
"received a naive datetime"))
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
# These two dates fall in the same day in EAT, but in different days,
# years and months in UTC.
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1)
self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
@skipUnlessDBFeature('supports_timezones')
def test_cursor_execute_accepts_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_timezones')
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, timezone.utc)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [utc_naive_dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_timezones')
def test_cursor_execute_returns_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
@skipIfDBFeature('supports_timezones')
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, timezone.utc)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [utc_naive_dt])
self.assertEqual(cursor.fetchall()[0][0], utc_naive_dt)
@requires_tz_support
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists())
def test_null_datetime(self):
# Regression test for #17294
e = MaybeEvent.objects.create()
self.assertEqual(e.dt, None)
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class ForcedTimeZoneDatabaseTests(TransactionTestCase):
"""
Test the TIME_ZONE database configuration parameter.
Since this involves reading and writing to the same database through two
connections, this is a TransactionTestCase.
"""
available_apps = ['timezones']
@classmethod
def setUpClass(cls):
# @skipIfDBFeature and @skipUnlessDBFeature cannot be chained. The
# outermost takes precedence. Handle skipping manually instead.
if connection.features.supports_timezones:
raise SkipTest("Database has feature(s) supports_timezones")
if not connection.features.test_db_allows_multiple_connections:
raise SkipTest("Database doesn't support feature(s): test_db_allows_multiple_connections")
super(ForcedTimeZoneDatabaseTests, cls).setUpClass()
connections.databases['tz'] = connections.databases['default'].copy()
connections.databases['tz']['TIME_ZONE'] = 'Asia/Bangkok'
@classmethod
def tearDownClass(cls):
connections['tz'].close()
del connections['tz']
del connections.databases['tz']
super(ForcedTimeZoneDatabaseTests, cls).tearDownClass()
def test_read_datetime(self):
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
Event.objects.create(dt=fake_dt)
event = Event.objects.using('tz').get()
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, dt)
def test_write_datetime(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.using('tz').create(dt=dt)
event = Event.objects.get()
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, fake_dt)
@skipUnlessDBFeature('supports_timezones')
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class UnsupportedTimeZoneDatabaseTests(TestCase):
def test_time_zone_parameter_not_supported_if_database_supports_timezone(self):
connections.databases['tz'] = connections.databases['default'].copy()
connections.databases['tz']['TIME_ZONE'] = 'Asia/Bangkok'
tz_conn = connections['tz']
try:
with self.assertRaises(ImproperlyConfigured):
tz_conn.cursor()
finally:
connections['tz'].close() # in case the test fails
del connections['tz']
del connections.databases['tz']
@override_settings(TIME_ZONE='Africa/Nairobi')
class SerializationTests(SimpleTestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes,
# but when it loads this representation, it substracts the offset and
# returns a naive datetime object in UTC (http://pyyaml.org/ticket/202).
# Tests are adapted to take these quirks into account.
def assert_python_contains_datetime(self, objects, dt):
self.assertEqual(objects[0]['fields']['dt'], dt)
def assert_json_contains_datetime(self, json, dt):
self.assertIn('"fields": {"dt": "%s"}' % dt, json)
def assert_xml_contains_datetime(self, xml, dt):
field = parseString(xml).getElementsByTagName('field')[0]
self.assertXMLEqual(field.childNodes[0].wholeText, dt)
def assert_yaml_contains_datetime(self, yaml, dt):
# Depending on the yaml dumper, '!timestamp' might be absent
six.assertRegex(self, yaml,
r"\n fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt))
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class TemplateTests(TestCase):
@requires_tz_support
def test_localtime_templatetag_and_filters(self):
"""
Test the {% localtime %} templatetag and related filters.
"""
datetimes = {
'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT),
'naive': datetime.datetime(2011, 9, 1, 13, 20, 30),
}
templates = {
'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"),
'noarg': Template("{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'on': Template("{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'off': Template("{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
}
# Transform a list of keys in 'datetimes' to the expected template
# output. This makes the definition of 'results' more readable.
def t(*result):
return '|'.join(datetimes[key].isoformat() for key in result)
# Results for USE_TZ = True
results = {
'utc': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('utc', 'eat', 'utc', 'ict'),
},
'eat': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('eat', 'eat', 'utc', 'ict'),
},
'ict': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('ict', 'eat', 'utc', 'ict'),
},
'naive': {
'notag': t('naive', 'eat', 'utc', 'ict'),
'noarg': t('naive', 'eat', 'utc', 'ict'),
'on': t('naive', 'eat', 'utc', 'ict'),
'off': t('naive', 'eat', 'utc', 'ict'),
}
}
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
# Changes for USE_TZ = False
results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict')
results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict')
with self.settings(USE_TZ=False):
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
@requires_pytz
def test_localtime_filters_with_pytz(self):
"""
Test the |localtime, |utc, and |timezone filters with pytz.
"""
# Use a pytz timezone as local time
tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE='Europe/Paris'):
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00")
# Use a pytz timezone as argument
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_localtime_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render()
def test_localtime_filters_do_not_raise_exceptions(self):
"""
Test the |localtime, |utc, and |timezone filters on bad inputs.
"""
tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}")
with self.settings(USE_TZ=True):
# bad datetime value
ctx = Context({'dt': None, 'tz': ICT})
self.assertEqual(tpl.render(ctx), "None|||")
ctx = Context({'dt': 'not a date', 'tz': ICT})
self.assertEqual(tpl.render(ctx), "not a date|||")
# bad timezone value
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None})
self.assertEqual(tpl.render(ctx), "")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'})
self.assertEqual(tpl.render(ctx), "")
@requires_tz_support
def test_timezone_templatetag(self):
"""
Test the {% timezone %} templatetag.
"""
tpl = Template(
"{% load tz %}"
"{{ dt }}|"
"{% timezone tz1 %}"
"{{ dt }}|"
"{% timezone tz2 %}"
"{{ dt }}"
"{% endtimezone %}"
"{% endtimezone %}"
)
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'tz1': ICT, 'tz2': None})
self.assertEqual(tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00")
@requires_pytz
def test_timezone_templatetag_with_pytz(self):
"""
Test the {% timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}")
# Use a pytz timezone as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': 'Europe/Paris'})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% timezone %}{% endtimezone %}").render()
with self.assertRaises(ValueError if pytz is None else pytz.UnknownTimeZoneError):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'}))
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_get_current_timezone_templatetag(self):
"""
Test the {% get_current_timezone %} templatetag.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Africa/Nairobi" if pytz else "EAT")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), "UTC")
tpl = Template("{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
@requires_pytz
def test_get_current_timezone_templatetag_with_pytz(self):
"""
Test the {% get_current_timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
with timezone.override(pytz.timezone('Europe/Paris')):
self.assertEqual(tpl.render(Context()), "Europe/Paris")
tpl = Template("{% load tz %}{% timezone 'Europe/Paris' %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Europe/Paris")
def test_get_current_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% get_current_timezone %}").render()
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_tz_template_context_processor(self):
"""
Test the django.template.context_processors.tz template context processor.
"""
tpl = Template("{{ TIME_ZONE }}")
context = Context()
self.assertEqual(tpl.render(context), "")
request_context = RequestContext(HttpRequest(), processors=[context_processors.tz])
self.assertEqual(tpl.render(request_context), "Africa/Nairobi" if pytz else "EAT")
@requires_tz_support
def test_date_and_time_template_filters(self):
tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20")
def test_date_and_time_template_filters_honor_localtime(self):
tpl = Template("{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}{% endlocaltime %}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
def test_localtime_with_time_zone_setting_set_to_none(self):
# Regression for #17274
tpl = Template("{% load tz %}{{ dt }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)})
with self.settings(TIME_ZONE=None):
# the actual value depends on the system time zone of the host
self.assertTrue(tpl.render(ctx).startswith("2011"))
@requires_tz_support
def test_now_template_tag_uses_current_time_zone(self):
# Regression for #17343
tpl = Template("{% now \"O\" %}")
self.assertEqual(tpl.render(Context({})), "+0300")
with timezone.override(ICT):
self.assertEqual(tpl.render(Context({})), "+0700")
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False)
class LegacyFormsTests(TestCase):
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
@requires_pytz
def test_form_with_non_existent_time(self):
form = EventForm({'dt': '2011-03-27 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0))
@requires_pytz
def test_form_with_ambiguous_time(self):
form = EventForm({'dt': '2011-10-30 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0))
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class NewFormsTests(TestCase):
@requires_tz_support
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_other_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30'})
with timezone.override(ICT):
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_explicit_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30+07:00'})
# Datetime inputs formats don't allow providing a time zone.
self.assertFalse(form.is_valid())
@requires_pytz
def test_form_with_non_existent_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-03-27 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-03-27 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_pytz
def test_form_with_ambiguous_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-10-30 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-10-30 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_tz_support
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_form(self):
form = EventLocalizedForm(initial={'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)})
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@requires_tz_support
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_model_form(self):
form = EventLocalizedModelForm(instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='timezones.urls')
class AdminTests(TestCase):
@classmethod
def setUpTestData(cls):
# password = "secret"
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
is_superuser=True, username='super', first_name='Super', last_name='User',
email='super@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
)
def setUp(self):
self.client.login(username='super', password='secret')
@requires_tz_support
def test_changelist(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin_tz:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(EAT).isoformat())
def test_changelist_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(ICT).isoformat())
@requires_tz_support
def test_change_editable(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(EAT).date().isoformat())
self.assertContains(response, e.dt.astimezone(EAT).time().isoformat())
def test_change_editable_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(ICT).date().isoformat())
self.assertContains(response, e.dt.astimezone(ICT).time().isoformat())
@requires_tz_support
def test_change_readonly(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(EAT).isoformat())
def test_change_readonly_in_other_timezone(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(ICT).isoformat())
| bsd-3-clause |
sudosurootdev/external_chromium_org | tools/gyp-explain.py | 153 | 3035 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints paths between gyp targets.
"""
import json
import os
import sys
import time
from collections import deque
def usage():
print """\
Usage:
tools/gyp-explain.py [--dot] chrome_dll# gtest#
"""
def GetPath(graph, fro, to):
"""Given a graph in (node -> list of successor nodes) dictionary format,
yields all paths from |fro| to |to|, starting with the shortest."""
# Storing full paths in the queue is a bit wasteful, but good enough for this.
q = deque([(fro, [])])
while q:
t, path = q.popleft()
if t == to:
yield path + [t]
for d in graph[t]:
q.append((d, path + [t]))
def MatchNode(graph, substring):
"""Given a dictionary, returns the key that matches |substring| best. Exits
if there's not one single best match."""
candidates = []
for target in graph:
if substring in target:
candidates.append(target)
if not candidates:
print 'No targets match "%s"' % substring
sys.exit(1)
if len(candidates) > 1:
print 'More than one target matches "%s": %s' % (
substring, ' '.join(candidates))
sys.exit(1)
return candidates[0]
def EscapeForDot(string):
suffix = '#target'
if string.endswith(suffix):
string = string[:-len(suffix)]
string = string.replace('\\', '\\\\')
return '"' + string + '"'
def GenerateDot(fro, to, paths):
"""Generates an input file for graphviz's dot program."""
prefixes = [os.path.commonprefix(path) for path in paths]
prefix = os.path.commonprefix(prefixes)
print '// Build with "dot -Tpng -ooutput.png this_file.dot"'
# "strict" collapses common paths.
print 'strict digraph {'
for path in paths:
print (' -> '.join(EscapeForDot(item[len(prefix):]) for item in path)), ';'
print '}'
def Main(argv):
# Check that dump.json exists and that it's not too old.
dump_json_dirty = False
try:
st = os.stat('dump.json')
file_age_s = time.time() - st.st_mtime
if file_age_s > 2 * 60 * 60:
print 'dump.json is more than 2 hours old.'
dump_json_dirty = True
except OSError:
print 'dump.json not found.'
dump_json_dirty = True
if dump_json_dirty:
print 'Run'
print ' GYP_GENERATORS=dump_dependency_json build/gyp_chromium'
print 'first, then try again.'
sys.exit(1)
g = json.load(open('dump.json'))
if len(argv) not in (3, 4):
usage()
sys.exit(1)
generate_dot = argv[1] == '--dot'
if generate_dot:
argv.pop(1)
fro = MatchNode(g, argv[1])
to = MatchNode(g, argv[2])
paths = list(GetPath(g, fro, to))
if len(paths) > 0:
if generate_dot:
GenerateDot(fro, to, paths)
else:
print 'These paths lead from %s to %s:' % (fro, to)
for path in paths:
print ' -> '.join(path)
else:
print 'No paths found from %s to %s.' % (fro, to)
if __name__ == '__main__':
Main(sys.argv)
| bsd-3-clause |
podemos-info/odoo | addons/hr_timesheet_sheet/__openerp__.py | 9 | 2992 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Timesheets Validation',
'version': '1.0',
'category': 'Human Resources',
"sequence": 16,
'description': """
This module helps you to easily encode and validate timesheet and attendances within the same view.
===================================================================================================
The upper part of the view is for attendances and track (sign in/sign out) events.
The lower part is for timesheet.
Other tabs contains statistics views to help you analyse your
time or the time of your team:
* Time spent by day (with attendances)
* Time spent by project
This module also implements a complete timesheet validation process:
* Draft sheet
* Confirmation at the end of the period by the employee
* Validation by the project manager
The validation can be configured in the company:
* Period size (day, week, month, year)
* Maximal difference between timesheet and attendances
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/hr_my_timesheet.jpeg','images/hr_timesheet_analysis.jpeg','images/hr_timesheet_sheet_analysis.jpeg','images/hr_timesheets.jpeg'],
'depends': ['hr_timesheet', 'hr_timesheet_invoice', 'process'],
'init_xml': [],
'update_xml': [
'security/ir.model.access.csv',
'security/hr_timesheet_sheet_security.xml',
'hr_timesheet_sheet_view.xml',
'hr_timesheet_workflow.xml',
'process/hr_timesheet_sheet_process.xml',
'board_hr_timesheet_view.xml',
'report/hr_timesheet_report_view.xml',
'report/timesheet_report_view.xml',
'wizard/hr_timesheet_current_view.xml',
'hr_timesheet_sheet_data.xml'
],
'demo_xml': ['hr_timesheet_sheet_demo.xml',
],
'test':['test/test_hr_timesheet_sheet.yml'],
'installable': True,
'auto_install': False,
'certificate': '0073297700829',
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aldanor/pytest-benchmark | src/pytest_benchmark/histogram.py | 1 | 3331 | from pytest_benchmark.utils import time_unit
try:
from pygal.graph.box import Box
from pygal.graph.box import is_list_like
from pygal.style import DefaultStyle
except ImportError as exc:
raise ImportError(exc.args, "Please install pygal and pygaljs or pytest-benchmark[histogram]")
class Plot(Box):
def __init__(self, annotations, *args, **kwargs):
super(Plot, self).__init__(*args, **kwargs)
self.annotations = annotations
def _box_points(self, serie, _):
return serie, [serie[0], serie[6]]
def _format(self, x):
sup = super(Box, self)._format
if is_list_like(x):
return "Min: {0[0]:.4f}\n" \
"Q1-1.5IQR: {0[1]:.4f}\n" \
"Q1: {0[2]:.4f}\nMedian: {0[3]:.4f}\nQ3: {0[4]:.4f}\n" \
"Q3+1.5IQR: {0[5]:.4f}\n" \
"Max: {0[6]:.4f}".format(x[:7])
else:
return sup(x)
def _tooltip_data(self, node, value, x, y, classes=None, xlabel=None):
super(Plot, self)._tooltip_data(node, value, x, y, classes=classes, xlabel=None)
if xlabel in self.annotations:
self.svg.node(node, 'desc', class_="x_label").text = self.annotations[xlabel]["name"]
def make_plot(bench_name, table, compare, current, annotations, sort):
class Style(DefaultStyle):
colors = []
font_family = 'Consolas, "Deja Vu Sans Mono", "Bitstream Vera Sans Mono", "Courier New", monospace'
for label, row in table:
if label == current:
colors.append(DefaultStyle.colors[0])
elif compare and str(compare.basename).startswith(label):
colors.append(DefaultStyle.colors[2])
else:
colors.append("#000000")
unit, adjustment = time_unit(min(
row[sort]
for _, row in table
))
minimum = int(min(row["min"] * adjustment for _, row in table))
maximum = int(max(
min(row["max"], row["hd15iqr"]) * adjustment
for _, row in table
) + 1)
try:
import pygaljs
except ImportError:
opts = {}
else:
opts = {
"js": [
pygaljs.uri("2.0.x", "pygal-tooltips.js")
]
}
plot = Plot(
annotations,
box_mode='tukey',
x_label_rotation=-90,
x_labels=[label for label, _ in table],
show_legend=False,
title="Speed in %sseconds of %s" % (unit, bench_name),
x_title="Trial",
y_title="%ss" % unit,
style=Style,
min_scale=20,
max_scale=20,
range=(minimum, maximum),
zero=minimum,
css=[
"file://style.css",
"file://graph.css",
"""inline:
.axis.x text {
text-anchor: middle !important;
}
.tooltip .value {
font-size: 1em !important;
}
"""
],
**opts
)
for label, row in table:
if label in annotations:
label += "\n@%s - %s rounds" % (annotations[label]["datetime"], row["rounds"])
serie = [row[field] * adjustment for field in ["min", "ld15iqr", "q1", "median", "q3", "hd15iqr", "max"]]
plot.add(label, serie)
return plot
| bsd-2-clause |
zhaochl/python-utils | verify_code/Imaging-1.1.7/build/lib.linux-x86_64-2.7/TiffTags.py | 44 | 4641 | #
# The Python Imaging Library.
# $Id$
#
# TIFF tags
#
# This module provides clear-text names for various well-known
# TIFF tags. the TIFF codec works just fine without it.
#
# Copyright (c) Secret Labs AB 1999.
#
# See the README file for information on usage and redistribution.
#
##
# This module provides constants and clear-text names for various
# well-known TIFF tags.
##
##
# Map tag numbers (or tag number, tag value tuples) to tag names.
TAGS = {
254: "NewSubfileType",
255: "SubfileType",
256: "ImageWidth",
257: "ImageLength",
258: "BitsPerSample",
259: "Compression",
(259, 1): "Uncompressed",
(259, 2): "CCITT 1d",
(259, 3): "Group 3 Fax",
(259, 4): "Group 4 Fax",
(259, 5): "LZW",
(259, 6): "JPEG",
(259, 32773): "PackBits",
262: "PhotometricInterpretation",
(262, 0): "WhiteIsZero",
(262, 1): "BlackIsZero",
(262, 2): "RGB",
(262, 3): "RGB Palette",
(262, 4): "Transparency Mask",
(262, 5): "CMYK",
(262, 6): "YCbCr",
(262, 8): "CieLAB",
(262, 32803): "CFA", # TIFF/EP, Adobe DNG
(262, 32892): "LinearRaw", # Adobe DNG
263: "Thresholding",
264: "CellWidth",
265: "CellHeight",
266: "FillOrder",
269: "DocumentName",
270: "ImageDescription",
271: "Make",
272: "Model",
273: "StripOffsets",
274: "Orientation",
277: "SamplesPerPixel",
278: "RowsPerStrip",
279: "StripByteCounts",
280: "MinSampleValue",
281: "MaxSampleValue",
282: "XResolution",
283: "YResolution",
284: "PlanarConfiguration",
(284, 1): "Contigous",
(284, 2): "Separate",
285: "PageName",
286: "XPosition",
287: "YPosition",
288: "FreeOffsets",
289: "FreeByteCounts",
290: "GrayResponseUnit",
291: "GrayResponseCurve",
292: "T4Options",
293: "T6Options",
296: "ResolutionUnit",
297: "PageNumber",
301: "TransferFunction",
305: "Software",
306: "DateTime",
315: "Artist",
316: "HostComputer",
317: "Predictor",
318: "WhitePoint",
319: "PrimaryChromaticies",
320: "ColorMap",
321: "HalftoneHints",
322: "TileWidth",
323: "TileLength",
324: "TileOffsets",
325: "TileByteCounts",
332: "InkSet",
333: "InkNames",
334: "NumberOfInks",
336: "DotRange",
337: "TargetPrinter",
338: "ExtraSamples",
339: "SampleFormat",
340: "SMinSampleValue",
341: "SMaxSampleValue",
342: "TransferRange",
347: "JPEGTables",
# obsolete JPEG tags
512: "JPEGProc",
513: "JPEGInterchangeFormat",
514: "JPEGInterchangeFormatLength",
515: "JPEGRestartInterval",
517: "JPEGLosslessPredictors",
518: "JPEGPointTransforms",
519: "JPEGQTables",
520: "JPEGDCTables",
521: "JPEGACTables",
529: "YCbCrCoefficients",
530: "YCbCrSubSampling",
531: "YCbCrPositioning",
532: "ReferenceBlackWhite",
# XMP
700: "XMP",
33432: "Copyright",
# various extensions (should check specs for "official" names)
33723: "IptcNaaInfo",
34377: "PhotoshopInfo",
# Exif IFD
34665: "ExifIFD",
# ICC Profile
34675: "ICCProfile",
# Adobe DNG
50706: "DNGVersion",
50707: "DNGBackwardVersion",
50708: "UniqueCameraModel",
50709: "LocalizedCameraModel",
50710: "CFAPlaneColor",
50711: "CFALayout",
50712: "LinearizationTable",
50713: "BlackLevelRepeatDim",
50714: "BlackLevel",
50715: "BlackLevelDeltaH",
50716: "BlackLevelDeltaV",
50717: "WhiteLevel",
50718: "DefaultScale",
50741: "BestQualityScale",
50719: "DefaultCropOrigin",
50720: "DefaultCropSize",
50778: "CalibrationIlluminant1",
50779: "CalibrationIlluminant2",
50721: "ColorMatrix1",
50722: "ColorMatrix2",
50723: "CameraCalibration1",
50724: "CameraCalibration2",
50725: "ReductionMatrix1",
50726: "ReductionMatrix2",
50727: "AnalogBalance",
50728: "AsShotNeutral",
50729: "AsShotWhiteXY",
50730: "BaselineExposure",
50731: "BaselineNoise",
50732: "BaselineSharpness",
50733: "BayerGreenSplit",
50734: "LinearResponseLimit",
50735: "CameraSerialNumber",
50736: "LensInfo",
50737: "ChromaBlurRadius",
50738: "AntiAliasStrength",
50740: "DNGPrivateData",
50741: "MakerNoteSafety",
}
##
# Map type numbers to type names.
TYPES = {
1: "byte",
2: "ascii",
3: "short",
4: "long",
5: "rational",
6: "signed byte",
7: "undefined",
8: "signed short",
9: "signed long",
10: "signed rational",
11: "float",
12: "double",
}
| apache-2.0 |
surgebiswas/poker | PokerBots_2017/Johnny/scipy/stats/tests/test_mstats_extras.py | 126 | 4761 | # pylint: disable-msg=W0611, W0612, W0511,R0201
"""Tests suite for maskedArray statistics.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, print_function, absolute_import
__author__ = "Pierre GF Gerard-Marchant ($Author: backtopop $)"
import numpy as np
import numpy.ma as ma
import scipy.stats.mstats as ms
#import scipy.stats.mmorestats as mms
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_almost_equal, assert_
class TestMisc(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def test_mjci(self):
"Tests the Marits-Jarrett estimator"
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(ms.mjci(data),[55.76819,45.84028,198.87875],5)
def test_trimmedmeanci(self):
"Tests the confidence intervals of the trimmed mean."
data = ma.array([545,555,558,572,575,576,578,580,
594,605,635,651,653,661,666])
assert_almost_equal(ms.trimmed_mean(data,0.2), 596.2, 1)
assert_equal(np.round(ms.trimmed_mean_ci(data,(0.2,0.2)),1),
[561.8, 630.6])
def test_idealfourths(self):
"Tests ideal-fourths"
test = np.arange(100)
assert_almost_equal(np.asarray(ms.idealfourths(test)),
[24.416667,74.583333],6)
test_2D = test.repeat(3).reshape(-1,3)
assert_almost_equal(ms.idealfourths(test_2D, axis=0),
[[24.416667,24.416667,24.416667],
[74.583333,74.583333,74.583333]],6)
assert_almost_equal(ms.idealfourths(test_2D, axis=1),
test.repeat(2).reshape(-1,2))
test = [0,0]
_result = ms.idealfourths(test)
assert_(np.isnan(_result).all())
#..............................................................................
class TestQuantiles(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def test_hdquantiles(self):
data = [0.706560797,0.727229578,0.990399276,0.927065621,0.158953014,
0.887764025,0.239407086,0.349638551,0.972791145,0.149789972,
0.936947700,0.132359948,0.046041972,0.641675031,0.945530547,
0.224218684,0.771450991,0.820257774,0.336458052,0.589113496,
0.509736129,0.696838829,0.491323573,0.622767425,0.775189248,
0.641461450,0.118455200,0.773029450,0.319280007,0.752229111,
0.047841438,0.466295911,0.583850781,0.840581845,0.550086491,
0.466470062,0.504765074,0.226855960,0.362641207,0.891620942,
0.127898691,0.490094097,0.044882048,0.041441695,0.317976349,
0.504135618,0.567353033,0.434617473,0.636243375,0.231803616,
0.230154113,0.160011327,0.819464108,0.854706985,0.438809221,
0.487427267,0.786907310,0.408367937,0.405534192,0.250444460,
0.995309248,0.144389588,0.739947527,0.953543606,0.680051621,
0.388382017,0.863530727,0.006514031,0.118007779,0.924024803,
0.384236354,0.893687694,0.626534881,0.473051932,0.750134705,
0.241843555,0.432947602,0.689538104,0.136934797,0.150206859,
0.474335206,0.907775349,0.525869295,0.189184225,0.854284286,
0.831089744,0.251637345,0.587038213,0.254475554,0.237781276,
0.827928620,0.480283781,0.594514455,0.213641488,0.024194386,
0.536668589,0.699497811,0.892804071,0.093835427,0.731107772]
#
assert_almost_equal(ms.hdquantiles(data,[0., 1.]),
[0.006514031, 0.995309248])
hdq = ms.hdquantiles(data,[0.25, 0.5, 0.75])
assert_almost_equal(hdq, [0.253210762, 0.512847491, 0.762232442,])
hdq = ms.hdquantiles_sd(data,[0.25, 0.5, 0.75])
assert_almost_equal(hdq, [0.03786954, 0.03805389, 0.03800152,], 4)
#
data = np.array(data).reshape(10,10)
hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0)
assert_almost_equal(hdq[:,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75]))
assert_almost_equal(hdq[:,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75]))
hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0,var=True)
assert_almost_equal(hdq[...,0],
ms.hdquantiles(data[:,0],[0.25,0.5,0.75],var=True))
assert_almost_equal(hdq[...,-1],
ms.hdquantiles(data[:,-1],[0.25,0.5,0.75], var=True))
###############################################################################
if __name__ == "__main__":
run_module_suite()
| mit |
thaim/ansible | lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration_info.py | 20 | 6543 | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mysqlconfiguration_info
version_added: "2.9"
short_description: Get Azure MySQL Configuration facts
description:
- Get facts of Azure MySQL Configuration.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
type: str
server_name:
description:
- The name of the server.
required: True
type: str
name:
description:
- Setting name.
type: str
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Get specific setting of MySQL Server
azure_rm_mysqlconfiguration_info:
resource_group: myResourceGroup
server_name: testmysqlserver
name: deadlock_timeout
- name: Get all settings of MySQL Server
azure_rm_mysqlconfiguration_info:
resource_group: myResourceGroup
server_name: server_name
'''
RETURN = '''
settings:
description:
- A list of dictionaries containing MySQL Server settings.
returned: always
type: complex
contains:
id:
description:
- Setting resource ID.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testmysqlser
ver/configurations/deadlock_timeout"
name:
description:
- Setting name.
returned: always
type: str
sample: deadlock_timeout
value:
description:
- Setting value.
returned: always
type: raw
sample: 1000
description:
description:
- Description of the configuration.
returned: always
type: str
sample: Deadlock timeout.
source:
description:
- Source of the configuration.
returned: always
type: str
sample: system-default
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.rdbms.mysql import MySQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMMySqlConfigurationInfo(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(changed=False)
self.mgmt_client = None
self.resource_group = None
self.server_name = None
self.name = None
super(AzureRMMySqlConfigurationInfo, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_mysqlconfiguration_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_mysqlconfiguration_facts' module has been renamed to 'azure_rm_mysqlconfiguration_info'", version='2.13')
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(MySQLManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if self.name is not None:
self.results['settings'] = self.get()
else:
self.results['settings'] = self.list_by_server()
return self.results
def get(self):
'''
Gets facts of the specified MySQL Configuration.
:return: deserialized MySQL Configurationinstance state dictionary
'''
response = None
results = []
try:
response = self.mgmt_client.configurations.get(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Configurations.')
if response is not None:
results.append(self.format_item(response))
return results
def list_by_server(self):
'''
Gets facts of the specified MySQL Configuration.
:return: deserialized MySQL Configurationinstance state dictionary
'''
response = None
results = []
try:
response = self.mgmt_client.configurations.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Configurations.')
if response is not None:
for item in response:
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'resource_group': self.resource_group,
'server_name': self.server_name,
'id': d['id'],
'name': d['name'],
'value': d['value'],
'description': d['description'],
'source': d['source']
}
return d
def main():
AzureRMMySqlConfigurationInfo()
if __name__ == '__main__':
main()
| mit |
Tatsh-ansible/ansible | lib/ansible/modules/cloud/amazon/s3_website.py | 50 | 11115 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: s3_website
short_description: Configure an s3 bucket as a website
description:
- Configure an s3 bucket as a website
version_added: "2.2"
author: Rob White (@wimnat)
options:
name:
description:
- "Name of the s3 bucket"
required: true
default: null
error_key:
description:
- "The object key name to use when a 4XX class error occurs. To remove an error key, set to None."
required: false
default: null
redirect_all_requests:
description:
- "Describes the redirect behavior for every request to this s3 bucket website endpoint"
required: false
default: null
region:
description:
- >
AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked,
followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the
S3 Location: US Standard.
required: false
default: null
state:
description:
- "Add or remove s3 website configuration"
required: false
default: present
choices: [ 'present', 'absent' ]
suffix:
description:
- >
Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to
samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash
character.
required: false
default: index.html
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Configure an s3 bucket to redirect all requests to example.com
- s3_website:
name: mybucket.com
redirect_all_requests: example.com
state: present
# Remove website configuration from an s3 bucket
- s3_website:
name: mybucket.com
state: absent
# Configure an s3 bucket as a website with index and error pages
- s3_website:
name: mybucket.com
suffix: home.htm
error_key: errors/404.htm
state: present
'''
RETURN = '''
index_document:
description: index document
type: complex
returned: always
contains:
suffix:
description: suffix that is appended to a request that is for a directory on the website endpoint
returned: success
type: string
sample: index.html
error_document:
description: error document
type: complex
returned: always
contains:
key:
description: object key name to use when a 4XX class error occurs
returned: when error_document parameter set
type: string
sample: error.html
redirect_all_requests_to:
description: where to redirect requests
type: complex
returned: always
contains:
host_name:
description: name of the host where requests will be redirected.
returned: when redirect all requests parameter set
type: string
sample: ansible.com
routing_rules:
description: routing rules
type: complex
returned: always
contains:
routing_rule:
host_name:
description: name of the host where requests will be redirected.
returned: when host name set as part of redirect rule
type: string
sample: ansible.com
condition:
key_prefix_equals:
description: object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be
ExamplePage.html
returned: when routing rule present
type: string
sample: docs/
redirect:
replace_key_prefix_with:
description: object key prefix to use in the redirect request
returned: when routing rule present
type: string
sample: documents/
'''
import time
try:
from botocore.exceptions import ClientError, ParamValidationError, NoCredentialsError
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def _create_redirect_dict(url):
redirect_dict = {}
url_split = url.split(':')
# Did we split anything?
if len(url_split) == 2:
redirect_dict[u'Protocol'] = url_split[0]
redirect_dict[u'HostName'] = url_split[1].replace('//', '')
elif len(url_split) == 1:
redirect_dict[u'HostName'] = url_split[0]
else:
raise ValueError('Redirect URL appears invalid')
return redirect_dict
def _create_website_configuration(suffix, error_key, redirect_all_requests):
website_configuration = {}
if error_key is not None:
website_configuration['ErrorDocument'] = { 'Key': error_key }
if suffix is not None:
website_configuration['IndexDocument'] = { 'Suffix': suffix }
if redirect_all_requests is not None:
website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
return website_configuration
def enable_or_update_bucket_as_website(client_connection, resource_connection, module):
bucket_name = module.params.get("name")
redirect_all_requests = module.params.get("redirect_all_requests")
# If redirect_all_requests is set then don't use the default suffix that has been set
if redirect_all_requests is not None:
suffix = None
else:
suffix = module.params.get("suffix")
error_key = module.params.get("error_key")
changed = False
try:
bucket_website = resource_connection.BucketWebsite(bucket_name)
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
try:
website_config = client_connection.get_bucket_website(Bucket=bucket_name)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
website_config = None
else:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
if website_config is None:
try:
bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
changed = True
except (ClientError, ParamValidationError) as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
except ValueError as e:
module.fail_json(msg=str(e))
else:
try:
if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \
(error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \
(redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)):
try:
bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
changed = True
except (ClientError, ParamValidationError) as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
except KeyError as e:
try:
bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
changed = True
except (ClientError, ParamValidationError) as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
except ValueError as e:
module.fail_json(msg=str(e))
# Wait 5 secs before getting the website_config again to give it time to update
time.sleep(5)
website_config = client_connection.get_bucket_website(Bucket=bucket_name)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(website_config))
def disable_bucket_as_website(client_connection, module):
changed = False
bucket_name = module.params.get("name")
try:
client_connection.get_bucket_website(Bucket=bucket_name)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
module.exit_json(changed=changed)
else:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
try:
client_connection.delete_bucket_website(Bucket=bucket_name)
changed = True
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
module.exit_json(changed=changed)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['present', 'absent']),
suffix=dict(type='str', required=False, default='index.html'),
error_key=dict(type='str', required=False),
redirect_all_requests=dict(type='str', required=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [
['redirect_all_requests', 'suffix'],
['redirect_all_requests', 'error_key']
])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
client_connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
resource_connection = boto3_conn(module, conn_type='resource', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
state = module.params.get("state")
if state == 'present':
enable_or_update_bucket_as_website(client_connection, resource_connection, module)
elif state == 'absent':
disable_bucket_as_website(client_connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
DaveA50/lbry | lbrynet/dht/protocol.py | 1 | 14824 | #!/usr/bin/env python
#
# This library is free software, distributed under the terms of
# the GNU Lesser General Public License Version 3, or any later version.
# See the COPYING file included in this archive
#
# The docstrings in this module contain epytext markup; API documentation
# may be created by processing this file with epydoc: http://epydoc.sf.net
import logging
import binascii
import time
from twisted.internet import protocol, defer
from twisted.python import failure
import twisted.internet.reactor
import constants
import encoding
import msgtypes
import msgformat
from contact import Contact
reactor = twisted.internet.reactor
log = logging.getLogger(__name__)
class TimeoutError(Exception):
""" Raised when a RPC times out """
def __init__(self, remote_contact_id):
# remote_contact_id is a binary blob so we need to convert it
# into something more readable
msg = 'Timeout connecting to {}'.format(binascii.hexlify(remote_contact_id))
Exception.__init__(self, msg)
self.remote_contact_id = remote_contact_id
class KademliaProtocol(protocol.DatagramProtocol):
""" Implements all low-level network-related functions of a Kademlia node """
msgSizeLimit = constants.udpDatagramMaxSize-26
maxToSendDelay = 10**-3#0.05
minToSendDelay = 10**-5#0.01
def __init__(self, node, msgEncoder=encoding.Bencode(), msgTranslator=msgformat.DefaultFormat()):
self._node = node
self._encoder = msgEncoder
self._translator = msgTranslator
self._sentMessages = {}
self._partialMessages = {}
self._partialMessagesProgress = {}
self._next = 0
self._callLaterList = {}
def sendRPC(self, contact, method, args, rawResponse=False):
""" Sends an RPC to the specified contact
@param contact: The contact (remote node) to send the RPC to
@type contact: kademlia.contacts.Contact
@param method: The name of remote method to invoke
@type method: str
@param args: A list of (non-keyword) arguments to pass to the remote
method, in the correct order
@type args: tuple
@param rawResponse: If this is set to C{True}, the caller of this RPC
will receive a tuple containing the actual response
message object and the originating address tuple as
a result; in other words, it will not be
interpreted by this class. Unless something special
needs to be done with the metadata associated with
the message, this should remain C{False}.
@type rawResponse: bool
@return: This immediately returns a deferred object, which will return
the result of the RPC call, or raise the relevant exception
if the remote node raised one. If C{rawResponse} is set to
C{True}, however, it will always return the actual response
message (which may be a C{ResponseMessage} or an
C{ErrorMessage}).
@rtype: twisted.internet.defer.Deferred
"""
msg = msgtypes.RequestMessage(self._node.id, method, args)
msgPrimitive = self._translator.toPrimitive(msg)
encodedMsg = self._encoder.encode(msgPrimitive)
df = defer.Deferred()
if rawResponse:
df._rpcRawResponse = True
# Set the RPC timeout timer
timeoutCall = reactor.callLater(constants.rpcTimeout, self._msgTimeout, msg.id) #IGNORE:E1101
# Transmit the data
self._send(encodedMsg, msg.id, (contact.address, contact.port))
self._sentMessages[msg.id] = (contact.id, df, timeoutCall)
return df
def datagramReceived(self, datagram, address):
""" Handles and parses incoming RPC messages (and responses)
@note: This is automatically called by Twisted when the protocol
receives a UDP datagram
"""
if datagram[0] == '\x00' and datagram[25] == '\x00':
totalPackets = (ord(datagram[1]) << 8) | ord(datagram[2])
msgID = datagram[5:25]
seqNumber = (ord(datagram[3]) << 8) | ord(datagram[4])
if msgID not in self._partialMessages:
self._partialMessages[msgID] = {}
self._partialMessages[msgID][seqNumber] = datagram[26:]
if len(self._partialMessages[msgID]) == totalPackets:
keys = self._partialMessages[msgID].keys()
keys.sort()
data = ''
for key in keys:
data += self._partialMessages[msgID][key]
datagram = data
del self._partialMessages[msgID]
else:
return
try:
msgPrimitive = self._encoder.decode(datagram)
except encoding.DecodeError:
# We received some rubbish here
return
except IndexError:
log.warning("Couldn't decode dht datagram from %s", address)
return
message = self._translator.fromPrimitive(msgPrimitive)
remoteContact = Contact(message.nodeID, address[0], address[1], self)
# Refresh the remote node's details in the local node's k-buckets
self._node.addContact(remoteContact)
if isinstance(message, msgtypes.RequestMessage):
# This is an RPC method request
self._handleRPC(remoteContact, message.id, message.request, message.args)
elif isinstance(message, msgtypes.ResponseMessage):
# Find the message that triggered this response
if self._sentMessages.has_key(message.id):
# Cancel timeout timer for this RPC
df, timeoutCall = self._sentMessages[message.id][1:3]
timeoutCall.cancel()
del self._sentMessages[message.id]
if hasattr(df, '_rpcRawResponse'):
# The RPC requested that the raw response message and originating address be returned; do not interpret it
df.callback((message, address))
elif isinstance(message, msgtypes.ErrorMessage):
# The RPC request raised a remote exception; raise it locally
if message.exceptionType.startswith('exceptions.'):
exceptionClassName = message.exceptionType[11:]
else:
localModuleHierarchy = self.__module__.split('.')
remoteHierarchy = message.exceptionType.split('.')
#strip the remote hierarchy
while remoteHierarchy[0] == localModuleHierarchy[0]:
remoteHierarchy.pop(0)
localModuleHierarchy.pop(0)
exceptionClassName = '.'.join(remoteHierarchy)
remoteException = None
try:
exec 'remoteException = %s("%s")' % (exceptionClassName, message.response)
except Exception:
# We could not recreate the exception; create a generic one
remoteException = Exception(message.response)
df.errback(remoteException)
else:
# We got a result from the RPC
df.callback(message.response)
else:
# If the original message isn't found, it must have timed out
#TODO: we should probably do something with this...
pass
def _send(self, data, rpcID, address):
""" Transmit the specified data over UDP, breaking it up into several
packets if necessary
If the data is spread over multiple UDP datagrams, the packets have the
following structure::
| | | | | |||||||||||| 0x00 |
|Transmision|Total number|Sequence number| RPC ID |Header end|
| type ID | of packets |of this packet | | indicator|
| (1 byte) | (2 bytes) | (2 bytes) |(20 bytes)| (1 byte) |
| | | | | |||||||||||| |
@note: The header used for breaking up large data segments will
possibly be moved out of the KademliaProtocol class in the
future, into something similar to a message translator/encoder
class (see C{kademlia.msgformat} and C{kademlia.encoding}).
"""
if len(data) > self.msgSizeLimit:
# We have to spread the data over multiple UDP datagrams, and provide sequencing information
# 1st byte is transmission type id, bytes 2 & 3 are the total number of packets in this transmission, bytes 4 & 5 are the sequence number for this specific packet
totalPackets = len(data) / self.msgSizeLimit
if len(data) % self.msgSizeLimit > 0:
totalPackets += 1
encTotalPackets = chr(totalPackets >> 8) + chr(totalPackets & 0xff)
seqNumber = 0
startPos = 0
while seqNumber < totalPackets:
#reactor.iterate() #IGNORE:E1101
packetData = data[startPos:startPos+self.msgSizeLimit]
encSeqNumber = chr(seqNumber >> 8) + chr(seqNumber & 0xff)
txData = '\x00%s%s%s\x00%s' % (encTotalPackets, encSeqNumber, rpcID, packetData)
self._sendNext(txData, address)
startPos += self.msgSizeLimit
seqNumber += 1
else:
self._sendNext(data, address)
def _sendNext(self, txData, address):
""" Send the next UDP packet """
ts = time.time()
delay = 0
if ts >= self._next:
delay = self.minToSendDelay
self._next = ts + self.minToSendDelay
else:
delay = (self._next-ts) + self.maxToSendDelay
self._next += self.maxToSendDelay
if self.transport:
laterCall = reactor.callLater(delay, self.transport.write, txData, address)
for key in self._callLaterList.keys():
if key <= ts:
del self._callLaterList[key]
self._callLaterList[self._next] = laterCall
def _sendResponse(self, contact, rpcID, response):
""" Send a RPC response to the specified contact
"""
msg = msgtypes.ResponseMessage(rpcID, self._node.id, response)
msgPrimitive = self._translator.toPrimitive(msg)
encodedMsg = self._encoder.encode(msgPrimitive)
self._send(encodedMsg, rpcID, (contact.address, contact.port))
def _sendError(self, contact, rpcID, exceptionType, exceptionMessage):
""" Send an RPC error message to the specified contact
"""
msg = msgtypes.ErrorMessage(rpcID, self._node.id, exceptionType, exceptionMessage)
msgPrimitive = self._translator.toPrimitive(msg)
encodedMsg = self._encoder.encode(msgPrimitive)
self._send(encodedMsg, rpcID, (contact.address, contact.port))
def _handleRPC(self, senderContact, rpcID, method, args):
""" Executes a local function in response to an RPC request """
# Set up the deferred callchain
def handleError(f):
self._sendError(senderContact, rpcID, f.type, f.getErrorMessage())
def handleResult(result):
self._sendResponse(senderContact, rpcID, result)
df = defer.Deferred()
df.addCallback(handleResult)
df.addErrback(handleError)
# Execute the RPC
func = getattr(self._node, method, None)
if callable(func) and hasattr(func, 'rpcmethod'):
# Call the exposed Node method and return the result to the deferred callback chain
try:
##try:
## # Try to pass the sender's node id to the function...
result = func(*args, **{'_rpcNodeID': senderContact.id, '_rpcNodeContact': senderContact})
##except TypeError:
## # ...or simply call it if that fails
## result = func(*args)
except Exception, e:
df.errback(failure.Failure(e))
else:
df.callback(result)
else:
# No such exposed method
df.errback( failure.Failure( AttributeError('Invalid method: %s' % method) ) )
def _msgTimeout(self, messageID):
""" Called when an RPC request message times out """
# Find the message that timed out
if self._sentMessages.has_key(messageID):
remoteContactID, df = self._sentMessages[messageID][0:2]
if self._partialMessages.has_key(messageID):
# We are still receiving this message
# See if any progress has been made; if not, kill the message
if self._partialMessagesProgress.has_key(messageID):
if len(self._partialMessagesProgress[messageID]) == len(self._partialMessages[messageID]):
# No progress has been made
del self._partialMessagesProgress[messageID]
del self._partialMessages[messageID]
df.errback(failure.Failure(TimeoutError(remoteContactID)))
return
# Reset the RPC timeout timer
timeoutCall = reactor.callLater(constants.rpcTimeout, self._msgTimeout, messageID) #IGNORE:E1101
self._sentMessages[messageID] = (remoteContactID, df, timeoutCall)
return
del self._sentMessages[messageID]
# The message's destination node is now considered to be dead;
# raise an (asynchronous) TimeoutError exception and update the host node
self._node.removeContact(remoteContactID)
df.errback(failure.Failure(TimeoutError(remoteContactID)))
else:
# This should never be reached
print "ERROR: deferred timed out, but is not present in sent messages list!"
def stopProtocol(self):
""" Called when the transport is disconnected.
Will only be called once, after all ports are disconnected.
"""
for key in self._callLaterList.keys():
try:
if key > time.time():
self._callLaterList[key].cancel()
except Exception, e:
print e
del self._callLaterList[key]
#TODO: test: do we really need the reactor.iterate() call?
reactor.iterate()
| mit |
yamt/neutron | quantum/openstack/common/timeutils.py | 39 | 5543 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import iso8601
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format"""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format"""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(e.message)
except TypeError as e:
raise ValueError(e.message)
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object"""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, basestring):
before = parse_strtime(before).replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, basestring):
after = parse_strtime(after).replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formated date from timestamp"""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None
def set_time_override(override_time=datetime.datetime.utcnow()):
"""
Override utils.utcnow to return a constant time or a list thereof,
one at a time.
"""
utcnow.override_time = override_time
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times."""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""
Determines if time is going to happen in the next window seconds.
:params dt: the time
:params window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon
| apache-2.0 |
abhishekgahlot/youtube-dl | youtube_dl/extractor/dreisat.py | 107 | 3259 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unified_strdate,
)
class DreiSatIE(InfoExtractor):
IE_NAME = '3sat'
_VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/(?:index\.php|mediathek\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
_TESTS = [
{
'url': 'http://www.3sat.de/mediathek/index.php?mode=play&obj=45918',
'md5': 'be37228896d30a88f315b638900a026e',
'info_dict': {
'id': '45918',
'ext': 'mp4',
'title': 'Waidmannsheil',
'description': 'md5:cce00ca1d70e21425e72c86a98a56817',
'uploader': '3sat',
'upload_date': '20140913'
}
},
{
'url': 'http://www.3sat.de/mediathek/mediathek.php?mode=play&obj=51066',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
details_url = 'http://www.3sat.de/mediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
details_doc = self._download_xml(details_url, video_id, 'Downloading video details')
status_code = details_doc.find('./status/statuscode')
if status_code is not None and status_code.text != 'ok':
code = status_code.text
if code == 'notVisibleAnymore':
message = 'Video %s is not available' % video_id
else:
message = '%s returned error: %s' % (self.IE_NAME, code)
raise ExtractorError(message, expected=True)
thumbnail_els = details_doc.findall('.//teaserimage')
thumbnails = [{
'width': int(te.attrib['key'].partition('x')[0]),
'height': int(te.attrib['key'].partition('x')[2]),
'url': te.text,
} for te in thumbnail_els]
information_el = details_doc.find('.//information')
video_title = information_el.find('./title').text
video_description = information_el.find('./detail').text
details_el = details_doc.find('.//details')
video_uploader = details_el.find('./channel').text
upload_date = unified_strdate(details_el.find('./airtime').text)
format_els = details_doc.findall('.//formitaet')
formats = [{
'format_id': fe.attrib['basetype'],
'width': int(fe.find('./width').text),
'height': int(fe.find('./height').text),
'url': fe.find('./url').text,
'filesize': int(fe.find('./filesize').text),
'video_bitrate': int(fe.find('./videoBitrate').text),
} for fe in format_els
if not fe.find('./url').text.startswith('http://www.metafilegenerator.de/')]
self._sort_formats(formats)
return {
'_type': 'video',
'id': video_id,
'title': video_title,
'formats': formats,
'description': video_description,
'thumbnails': thumbnails,
'thumbnail': thumbnails[-1]['url'],
'uploader': video_uploader,
'upload_date': upload_date,
}
| unlicense |
lmazuel/azure-sdk-for-python | azure-mgmt-scheduler/tests/test_scheduler_patch.py | 3 | 1458 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from datetime import timedelta
from azure.mgmt.scheduler import patch
# Retry policy
# 20 seconds => "retryType": "fixed", "retryInterval": "00:00:20",
# 20 minutes => "retryType": "fixed", "retryInterval": "00:20:00",
# 1 hours => "retryType": "fixed", "retryInterval": "01:00:00",
# 2 days => "retryType": "fixed", "retryInterval": "60.00:00:00",
# default => "retryType": "fixed"
# none => "retryType": "none"
def test_iso_to_timespan():
assert patch.from_timespan_to_iso8601("00:00:20") == "P0DT0H0M20S"
assert patch.from_timespan_to_iso8601("00:20:00") == "P0DT0H20M0S"
assert patch.from_timespan_to_iso8601("01:00:00") == "P0DT1H0M0S"
assert patch.from_timespan_to_iso8601("60.00:00:00") == "P2DT0H0M0S"
def test_timestamp_toiso():
assert patch.from_iso8601_to_timespan(timedelta(days=1)) == "30.00:00:00"
assert patch.from_iso8601_to_timespan(timedelta(hours=1)) == "01:00:00"
assert patch.from_iso8601_to_timespan(timedelta(minutes=1)) == "00:01:00"
assert patch.from_iso8601_to_timespan(timedelta(seconds=1)) == "00:00:01"
assert patch.from_iso8601_to_timespan("PT1S") == "00:00:01"
| mit |
Solinea/horizon | openstack_dashboard/dashboards/router/nexus1000v/forms.py | 35 | 10255 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
LOG = logging.getLogger(__name__)
def get_tenant_choices(request):
tenant_choices = [('', _("Select a project"))]
tenants = []
try:
tenants, has_more = api.keystone.tenant_list(request)
except Exception:
msg = _('Projects could not be retrieved.')
exceptions.handle(request, msg)
for tenant in tenants:
if tenant.enabled:
tenant_choices.append((tenant.id, tenant.name))
return tenant_choices
class CreateNetworkProfile(forms.SelfHandlingForm):
"""Create Network Profile form."""
name = forms.CharField(max_length=255,
label=_("Name"))
segment_type = forms.ChoiceField(label=_('Segment Type'),
choices=[('vlan', _('VLAN')),
('overlay', _('Overlay')),
('trunk', _('Trunk'))],
widget=forms.Select
(attrs={'class': 'switchable',
'data-slug': 'segtype'}))
# Sub type options available for Overlay segment type
sub_type = forms.ChoiceField(label=_('Sub Type'),
choices=[('native_vxlan', _('Native VXLAN')),
('enhanced', _('Enhanced VXLAN')),
('other', _('Other'))],
required=False,
widget=forms.Select
(attrs={'class': 'switchable switched',
'data-slug': 'subtype',
'data-switch-on': 'segtype',
'data-segtype-overlay':
_("Sub Type")}))
# Sub type options available for Trunk segment type
sub_type_trunk = forms.ChoiceField(label=_('Sub Type'),
choices=[('vlan', _('VLAN'))],
required=False,
widget=forms.Select
(attrs={'class': 'switched',
'data-switch-on': 'segtype',
'data-segtype-trunk':
_("Sub Type")}))
segment_range = forms.CharField(max_length=255,
label=_("Segment Range"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on': 'segtype',
'data-segtype-vlan':
_("Segment Range"),
'data-segtype-overlay':
_("Segment Range")}),
help_text=_("1-4093 for VLAN; "
"5000 and above for Overlay"))
multicast_ip_range = forms.CharField(max_length=30,
label=_("Multicast IP Range"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on':
'subtype',
'data-subtype-native_vxlan':
_("Multicast IP Range")}),
help_text=_("Multicast IPv4 range"
"(e.g. 224.0.1.0-"
"224.0.1.100)"))
other_subtype = forms.CharField(max_length=255,
label=_("Sub Type Value (Manual Input)"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on':
'subtype',
'data-subtype-other':
_("Sub Type Value "
"(Manual Input)")}),
help_text=_("Enter parameter (e.g. GRE)"))
physical_network = forms.CharField(max_length=255,
label=_("Physical Network"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on': 'segtype',
'data-segtype-vlan':
_("Physical Network")}))
project = forms.ChoiceField(label=_("Project"),
required=False)
def __init__(self, request, *args, **kwargs):
super(CreateNetworkProfile, self).__init__(request, *args, **kwargs)
self.fields['project'].choices = get_tenant_choices(request)
def clean(self):
# If sub_type is 'other' or 'trunk' then
# assign this new value for sub_type
cleaned_data = super(CreateNetworkProfile, self).clean()
segment_type = cleaned_data.get('segment_type')
if segment_type == 'overlay':
sub_type = cleaned_data.get('sub_type')
if sub_type == 'other':
other_subtype = cleaned_data.get('other_subtype')
cleaned_data['sub_type'] = other_subtype
LOG.debug('subtype is now %(params)s',
{'params': other_subtype})
elif segment_type == 'trunk':
sub_type_trunk = cleaned_data.get('sub_type_trunk')
cleaned_data['sub_type'] = sub_type_trunk
LOG.debug('subtype is now %(params)s',
{'params': sub_type_trunk})
return cleaned_data
def handle(self, request, data):
try:
LOG.debug('request = %(req)s, params = %(params)s',
{'req': request, 'params': data})
params = {'name': data['name'],
'segment_type': data['segment_type'],
'sub_type': data['sub_type'],
'segment_range': data['segment_range'],
'physical_network': data['physical_network'],
'multicast_ip_range': data['multicast_ip_range'],
'tenant_id': data['project']}
profile = api.neutron.profile_create(request,
**params)
msg = _('Network Profile %s '
'was successfully created.') % data['name']
LOG.debug(msg)
messages.success(request, msg)
return profile
except Exception:
redirect = reverse('horizon:router:nexus1000v:index')
msg = _('Failed to create network profile %s') % data['name']
exceptions.handle(request, msg, redirect=redirect)
class UpdateNetworkProfile(CreateNetworkProfile):
"""Update Network Profile form."""
profile_id = forms.CharField(label=_("ID"),
widget=forms.HiddenInput())
project = forms.CharField(label=_("Project"), required=False)
def __init__(self, request, *args, **kwargs):
super(UpdateNetworkProfile, self).__init__(request, *args, **kwargs)
self.fields['segment_type'].widget.attrs['readonly'] = 'readonly'
self.fields['sub_type'].widget.attrs['readonly'] = 'readonly'
self.fields['sub_type_trunk'].widget.attrs['readonly'] = 'readonly'
self.fields['other_subtype'].widget.attrs['readonly'] = 'readonly'
self.fields['physical_network'].widget.attrs['readonly'] = 'readonly'
self.fields['project'].widget.attrs['readonly'] = 'readonly'
def handle(self, request, data):
try:
LOG.debug('request = %(req)s, params = %(params)s',
{'req': request, 'params': data})
params = {'name': data['name'],
'segment_range': data['segment_range'],
'multicast_ip_range': data['multicast_ip_range']}
profile = api.neutron.profile_update(
request,
data['profile_id'],
**params
)
msg = _('Network Profile %s '
'was successfully updated.') % data['name']
LOG.debug(msg)
messages.success(request, msg)
return profile
except Exception:
msg = _('Failed to update '
'network profile (%s).') % data['name']
redirect = reverse('horizon:router:nexus1000v:index')
exceptions.handle(request, msg, redirect=redirect)
return False
| apache-2.0 |
microcom/microcom-runbot | runbot/res_config.py | 1 | 2935 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class runbot_config_settings(osv.osv_memory):
_name = 'runbot.config.settings'
_inherit = 'res.config.settings'
_columns = {
'default_workers': fields.integer('Total Number of Workers'),
'default_running_max': fields.integer('Maximum Number of Running Builds'),
'default_timeout': fields.integer('Default Timeout (in seconds)'),
'default_starting_port': fields.integer('Starting Port for Running Builds'),
'default_domain': fields.char('Runbot Domain'),
}
def get_default_parameters(self, cr, uid, fields, context=None):
icp = self.pool['ir.config_parameter']
workers = icp.get_param(cr, uid, 'runbot.workers', default=6)
running_max = icp.get_param(cr, uid, 'runbot.running_max', default=75)
timeout = icp.get_param(cr, uid, 'runbot.timeout', default=1800)
starting_port = icp.get_param(cr, uid, 'runbot.starting_port', default=2000)
runbot_domain = icp.get_param(cr, uid, 'runbot.domain', default='runbot.odoo.com')
return {
'default_workers': int(workers),
'default_running_max': int(running_max),
'default_timeout': int(timeout),
'default_starting_port': int(starting_port),
'default_domain': runbot_domain,
}
def set_default_parameters(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
icp = self.pool['ir.config_parameter']
icp.set_param(cr, uid, 'runbot.workers', config.default_workers)
icp.set_param(cr, uid, 'runbot.running_max', config.default_running_max)
icp.set_param(cr, uid, 'runbot.timeout', config.default_timeout)
icp.set_param(cr, uid, 'runbot.starting_port', config.default_starting_port)
icp.set_param(cr, uid, 'runbot.domain', config.default_domain)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nin042/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/cmake.py | 123 | 7236 | # Copyright (C) 2012 Intel Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports checking WebKit style in cmake files.(.cmake, CMakeLists.txt)"""
import re
from common import TabChecker
class CMakeChecker(object):
"""Processes CMake lines for checking style."""
# NO_SPACE_CMDS list are based on commands section of CMake document.
# Now it is generated from
# http://www.cmake.org/cmake/help/v2.8.10/cmake.html#section_Commands.
# Some commands are from default CMake modules such as pkg_check_modules.
# Please keep list in alphabet order.
#
# For commands in this list, spaces should not be added it and its
# parentheses. For eg, message("testing"), not message ("testing")
#
# The conditional commands like if, else, endif, foreach, endforeach,
# while, endwhile and break are listed in ONE_SPACE_CMDS
NO_SPACE_CMDS = [
'add_custom_command', 'add_custom_target', 'add_definitions',
'add_dependencies', 'add_executable', 'add_library',
'add_subdirectory', 'add_test', 'aux_source_directory',
'build_command',
'cmake_minimum_required', 'cmake_policy', 'configure_file',
'create_test_sourcelist',
'define_property',
'enable_language', 'enable_testing', 'endfunction', 'endmacro',
'execute_process', 'export',
'file', 'find_file', 'find_library', 'find_package', 'find_path',
'find_program', 'fltk_wrap_ui', 'function',
'get_cmake_property', 'get_directory_property',
'get_filename_component', 'get_property', 'get_source_file_property',
'get_target_property', 'get_test_property',
'include', 'include_directories', 'include_external_msproject',
'include_regular_expression', 'install',
'link_directories', 'list', 'load_cache', 'load_command',
'macro', 'mark_as_advanced', 'math', 'message',
'option',
#From FindPkgConfig.cmake
'pkg_check_modules',
'project',
'qt_wrap_cpp', 'qt_wrap_ui',
'remove_definitions', 'return',
'separate_arguments', 'set', 'set_directory_properties', 'set_property',
'set_source_files_properties', 'set_target_properties',
'set_tests_properties', 'site_name', 'source_group', 'string',
'target_link_libraries', 'try_compile', 'try_run',
'unset',
'variable_watch',
]
# CMake conditional commands, require one space between command and
# its parentheses, such as "if (", "foreach (", etc.
ONE_SPACE_CMDS = [
'if', 'else', 'elseif', 'endif',
'foreach', 'endforeach',
'while', 'endwhile',
'break',
]
def __init__(self, file_path, handle_style_error):
self._handle_style_error = handle_style_error
self._tab_checker = TabChecker(file_path, handle_style_error)
def check(self, lines):
self._tab_checker.check(lines)
self._num_lines = len(lines)
for l in xrange(self._num_lines):
self._process_line(l + 1, lines[l])
def _process_line(self, line_number, line_content):
if re.match('(^|\ +)#', line_content):
# ignore comment line
return
l = line_content.expandtabs(4)
# check command like message( "testing")
if re.search('\(\ +', l):
self._handle_style_error(line_number, 'whitespace/parentheses', 5,
'No space after "("')
# check command like message("testing" )
if re.search('\ +\)', l) and not re.search('^\ +\)$', l):
self._handle_style_error(line_number, 'whitespace/parentheses', 5,
'No space before ")"')
self._check_trailing_whitespace(line_number, l)
self._check_no_space_cmds(line_number, l)
self._check_one_space_cmds(line_number, l)
self._check_indent(line_number, line_content)
def _check_trailing_whitespace(self, line_number, line_content):
line_content = line_content.rstrip('\n') # chr(10), newline
line_content = line_content.rstrip('\r') # chr(13), carriage return
line_content = line_content.rstrip('\x0c') # chr(12), form feed, ^L
stripped = line_content.rstrip()
if line_content != stripped:
self._handle_style_error(line_number, 'whitespace/trailing', 5,
'No trailing spaces')
def _check_no_space_cmds(self, line_number, line_content):
# check command like "SET (" or "Set("
for t in self.NO_SPACE_CMDS:
self._check_non_lowercase_cmd(line_number, line_content, t)
if re.search('(^|\ +)' + t.lower() + '\ +\(', line_content):
msg = 'No space between command "' + t.lower() + '" and its parentheses, should be "' + t + '("'
self._handle_style_error(line_number, 'whitespace/parentheses', 5, msg)
def _check_one_space_cmds(self, line_number, line_content):
# check command like "IF (" or "if(" or "if (" or "If ()"
for t in self.ONE_SPACE_CMDS:
self._check_non_lowercase_cmd(line_number, line_content, t)
if re.search('(^|\ +)' + t.lower() + '(\(|\ \ +\()', line_content):
msg = 'One space between command "' + t.lower() + '" and its parentheses, should be "' + t + ' ("'
self._handle_style_error(line_number, 'whitespace/parentheses', 5, msg)
def _check_non_lowercase_cmd(self, line_number, line_content, cmd):
if re.search('(^|\ +)' + cmd + '\ *\(', line_content, flags=re.IGNORECASE) and \
(not re.search('(^|\ +)' + cmd.lower() + '\ *\(', line_content)):
msg = 'Use lowercase command "' + cmd.lower() + '"'
self._handle_style_error(line_number, 'command/lowercase', 5, msg)
def _check_indent(self, line_number, line_content):
#TODO (halton): add indent checking
pass
| bsd-3-clause |
cvmfs/cvmfs | cvmfs/webapi/test_cvmfs_geo.py | 2 | 5214 | from __future__ import print_function
import unittest
import socket
import cvmfs_geo
from cvmfs_geo import distance_on_unit_sphere
from cvmfs_geo import addr_geoinfo
from cvmfs_geo import name_geoinfo
from cvmfs_geo import geosort_servers
###
# Simulate a small geo IP database, since we can't always
# expect a full one to be available. IPv4 addresses are
# always preferred, so for those with IPv6 use only IPv6.
def getaddrs(name, type):
addrs = []
for info in socket.getaddrinfo(name,80,0,0,socket.IPPROTO_TCP):
if info[0] == type:
addrs.append(info[4][0])
return addrs
CERNgeo = {
'latitude': 46.2324,
'longitude': 6.0502
}
CERNname = 'cvmfs-stratum-one.cern.ch'
CERNaddrs = getaddrs(CERNname, socket.AF_INET6)
if len(CERNaddrs) == 0: # fallback on IPv4-only systems
CERNaddrs = getaddrs(CERNname, socket.AF_INET)
FNALgeo = {
'latitude': 41.7768,
'longitude': -88.4604
}
FNALname = 'cvmfs.fnal.gov'
FNALaddrs = getaddrs(FNALname, socket.AF_INET)
IHEPgeo = {
'latitude': 39.9289,
'longitude': 116.3883
}
IHEPname = 'cvmfs-stratum-one.ihep.ac.cn'
IHEPaddrs = getaddrs(IHEPname, socket.AF_INET)
RALgeo = {
'latitude': 51.75,
'longitude': -1.25
}
RALname = 'cernvmfs.gridpp.rl.ac.uk'
RALaddrs = getaddrs(RALname, socket.AF_INET6)
if len(RALaddrs) == 0: # fallback on IPv4-only systems
RALaddrs = getaddrs(RALname, socket.AF_INET)
class giTestDb():
def get(self, addr):
answer = None
if addr in FNALaddrs:
answer = FNALgeo
elif addr in IHEPaddrs:
answer = IHEPgeo
elif addr in CERNaddrs:
answer = CERNgeo
elif addr in RALaddrs:
answer = RALgeo
else:
return None
return {'location' : answer}
cvmfs_geo.gireader = giTestDb()
####
class GeoTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test1Distance(self):
self.assertEqual(0.0, distance_on_unit_sphere(0, 0, 0, 0))
self.assertAlmostEqual(1.11458455,
distance_on_unit_sphere(FNALgeo['latitude'], FNALgeo['longitude'],
CERNgeo['latitude'], CERNgeo['longitude']))
self.assertAlmostEqual(1.11458455,
distance_on_unit_sphere(CERNgeo['latitude'], CERNgeo['longitude'],
FNALgeo['latitude'], FNALgeo['longitude']))
self.assertAlmostEqual(1.6622382,
distance_on_unit_sphere(IHEPgeo['latitude'], IHEPgeo['longitude'],
FNALgeo['latitude'], FNALgeo['longitude']))
self.assertAlmostEqual(0.1274021,
distance_on_unit_sphere(CERNgeo['latitude'], CERNgeo['longitude'],
RALgeo['latitude'], RALgeo['longitude']))
self.assertAlmostEqual(1.2830254,
distance_on_unit_sphere(IHEPgeo['latitude'], IHEPgeo['longitude'],
RALgeo['latitude'], RALgeo['longitude']))
# surprisingly, CERN is slightly further from IHEP than RAL
self.assertAlmostEqual(1.2878979,
distance_on_unit_sphere(IHEPgeo['latitude'], IHEPgeo['longitude'],
CERNgeo['latitude'], CERNgeo['longitude']))
def test2AddrGeoinfo(self):
now = 0
self.assertEqual(CERNgeo, addr_geoinfo(now, CERNaddrs[0]))
self.assertEqual(FNALgeo, addr_geoinfo(now, FNALaddrs[0]))
self.assertEqual(IHEPgeo, addr_geoinfo(now, IHEPaddrs[0]))
self.assertEqual(RALgeo, addr_geoinfo(now, RALaddrs[0]))
def test3NameGeoinfo(self):
self.assertEqual(0, len(cvmfs_geo.geo_cache))
now = 0
self.assertEqual(CERNgeo, name_geoinfo(now, CERNname))
self.assertEqual(FNALgeo, name_geoinfo(now, FNALname))
self.assertEqual(IHEPgeo, name_geoinfo(now, IHEPname))
self.assertEqual(RALgeo, name_geoinfo(now, RALname))
self.assertEqual(4, len(cvmfs_geo.geo_cache))
# test the caching, when there's no database available
savegireader = cvmfs_geo.gireader
cvmfs_geo.gireader = None
now = 1
self.assertEqual(CERNgeo, name_geoinfo(now, CERNname))
self.assertEqual(FNALgeo, name_geoinfo(now, FNALname))
self.assertEqual(IHEPgeo, name_geoinfo(now, IHEPname))
self.assertEqual(RALgeo, name_geoinfo(now, RALname))
cvmfs_geo.gireader = savegireader
def test4GeosortServers(self):
self.assertEqual([True, [3, 0, 1, 2]],
geosort_servers(0, RALgeo, [CERNname, FNALname, IHEPname, RALname]))
self.assertEqual([True, [0, 3, 2, 1]],
geosort_servers(0, RALgeo, [RALname, IHEPname, FNALname, CERNname]))
self.assertEqual([True, [1, 0, 3, 2]],
geosort_servers(0, IHEPgeo, [RALname, IHEPname, FNALname, CERNname]))
self.assertEqual([True, [2, 3, 0, 1]],
geosort_servers(0, CERNgeo, [FNALname, IHEPname, CERNname, RALname]))
self.assertEqual([True, [3, 2, 1, 0]],
geosort_servers(0, FNALgeo, [IHEPname, CERNname, RALname, FNALname]))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
rohanp/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 23 | 45330 | from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity', include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity', include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity', include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
amenonsen/ansible | lib/ansible/module_utils/network/nxos/config/vlans/vlans.py | 4 | 8762 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The nxos_vlans class
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to it's desired end-state is
created
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.network.common.cfg.base import ConfigBase
from ansible.module_utils.network.common.utils import dict_diff, to_list, remove_empties
from ansible.module_utils.network.nxos.facts.facts import Facts
from ansible.module_utils.network.nxos.utils.utils import get_interface_type, normalize_interface, search_obj_in_list
class Vlans(ConfigBase):
"""
The nxos_vlans class
"""
gather_subset = [
'!all',
'!min',
]
gather_network_resources = [
'vlans',
]
exclude_params = ['name', 'state']
def __init__(self, module):
super(Vlans, self).__init__(module)
def get_vlans_facts(self):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
vlans_facts = facts['ansible_network_resources'].get('vlans')
if not vlans_facts:
return []
return vlans_facts
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {'changed': False}
commands = list()
warnings = list()
existing_vlans_facts = self.get_vlans_facts()
commands.extend(self.set_config(existing_vlans_facts))
if commands:
if not self._module.check_mode:
self._connection.edit_config(commands)
result['changed'] = True
result['commands'] = commands
changed_vlans_facts = self.get_vlans_facts()
result['before'] = existing_vlans_facts
if result['changed']:
result['after'] = changed_vlans_facts
result['warnings'] = warnings
return result
def set_config(self, existing_vlans_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
config = self._module.params.get('config')
want = []
if config:
for w in config:
want.append(remove_empties(w))
have = existing_vlans_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
state = self._module.params['state']
if state in ('overridden', 'merged', 'replaced') and not want:
self._module.fail_json(msg='config is required for state {0}'.format(state))
commands = list()
if state == 'overridden':
commands.extend(self._state_overridden(want, have))
elif state == 'deleted':
commands.extend(self._state_deleted(want, have))
else:
for w in want:
if state == 'merged':
commands.extend(self._state_merged(w, have))
elif state == 'replaced':
commands.extend(self._state_replaced(w, have))
return commands
def _state_replaced(self, w, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
obj_in_have = search_obj_in_list(w['vlan_id'], have, 'vlan_id')
diff = dict_diff(w, obj_in_have)
merged_commands = self.set_commands(w, have)
if 'vlan_id' not in diff:
diff['vlan_id'] = w['vlan_id']
wkeys = w.keys()
dkeys = diff.keys()
for k in wkeys:
if k in self.exclude_params and k in dkeys:
del diff[k]
replaced_commands = self.del_attribs(diff)
if merged_commands:
cmds = set(replaced_commands).intersection(set(merged_commands))
for cmd in cmds:
merged_commands.remove(cmd)
commands.extend(replaced_commands)
commands.extend(merged_commands)
return commands
def _state_overridden(self, want, have):
""" The command generator when state is overridden
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
commands = []
for h in have:
obj_in_want = search_obj_in_list(h['vlan_id'], want, 'vlan_id')
if h == obj_in_want:
continue
for w in want:
if h['vlan_id'] == w['vlan_id']:
wkeys = w.keys()
hkeys = h.keys()
for k in wkeys:
if k in self.exclude_params and k in hkeys:
del h[k]
commands.extend(self.del_attribs(h))
for w in want:
commands.extend(self.set_commands(w, have))
return commands
def _state_merged(self, w, have):
""" The command generator when state is merged
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
return self.set_commands(w, have)
def _state_deleted(self, want, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
commands = []
if want:
for w in want:
obj_in_have = search_obj_in_list(w['vlan_id'], have, 'vlan_id')
commands.append('no vlan ' + str(obj_in_have['vlan_id']))
else:
if not have:
return commands
for h in have:
commands.append('no vlan ' + str(h['vlan_id']))
return commands
def del_attribs(self, obj):
commands = []
if not obj or len(obj.keys()) == 1:
return commands
commands.append('vlan ' + str(obj['vlan_id']))
if 'name' in obj:
commands.append('no' + ' ' + 'name')
if 'state' in obj:
commands.append('no state')
if 'enabled' in obj:
commands.append('no shutdown')
if 'mode' in obj:
commands.append('mode ce')
if 'mapped_vni' in obj:
commands.append('no vn-segment')
return commands
def diff_of_dicts(self, w, obj):
diff = set(w.items()) - set(obj.items())
diff = dict(diff)
if diff and w['vlan_id'] == obj['vlan_id']:
diff.update({'vlan_id': w['vlan_id']})
return diff
def add_commands(self, d):
commands = []
if not d:
return commands
commands.append('vlan' + ' ' + str(d['vlan_id']))
if 'name' in d:
commands.append('name ' + d['name'])
if 'state' in d:
commands.append('state ' + d['state'])
if 'enabled' in d:
if d['enabled'] == 'True':
commands.append('no shutdown')
else:
commands.append('shutdown')
if 'mode' in d:
commands.append('mode ' + d['mode'])
if 'mapped_vni' in d:
commands.append('vn-segment ' + d['mapped_vni'])
return commands
def set_commands(self, w, have):
commands = []
obj_in_have = search_obj_in_list(w['vlan_id'], have, 'vlan_id')
if not obj_in_have:
commands = self.add_commands(w)
else:
diff = self.diff_of_dicts(w, obj_in_have)
commands = self.add_commands(diff)
return commands
| gpl-3.0 |
tux-00/ansible | lib/ansible/module_utils/f5_utils.py | 9 | 10277 | #
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Legacy
try:
import bigsuds
bigsuds_found = True
except ImportError:
bigsuds_found = False
from ansible.module_utils.basic import env_fallback
def f5_argument_spec():
return dict(
server=dict(
type='str',
required=True,
fallback=(env_fallback, ['F5_SERVER'])
),
user=dict(
type='str',
required=True,
fallback=(env_fallback, ['F5_USER'])
),
password=dict(
type='str',
aliases=['pass', 'pwd'],
required=True,
no_log=True,
fallback=(env_fallback, ['F5_PASSWORD'])
),
validate_certs=dict(
default='yes',
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
server_port=dict(
type='int',
default=443,
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
partition=dict(
type='str',
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
def f5_parse_arguments(module):
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(
msg="bigsuds does not support verifying certificates with python < 2.7.9."
"Either update python or set validate_certs=False on the task'")
return (
module.params['server'],
module.params['user'],
module.params['password'],
module.params['state'],
module.params['partition'],
module.params['validate_certs'],
module.params['server_port']
)
def bigip_api(bigip, user, password, validate_certs, port=443):
try:
if bigsuds.__version__ >= '1.0.4':
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs, port=port)
elif bigsuds.__version__ == '1.0.3':
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs)
else:
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
except TypeError:
# bigsuds < 1.0.3, no verify param
if validate_certs:
# Note: verified we have SSLContext when we parsed params
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
else:
import ssl
if hasattr(ssl, 'SSLContext'):
# Really, you should never do this. It disables certificate
# verification *globally*. But since older bigip libraries
# don't give us a way to toggle verification we need to
# disable it at the global level.
# From https://www.python.org/dev/peps/pep-0476/#id29
ssl._create_default_https_context = ssl._create_unverified_context
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
return api
# Fully Qualified name (with the partition)
def fq_name(partition, name):
if name is not None and not name.startswith('/'):
return '/%s/%s' % (partition, name)
return name
# Fully Qualified name (with partition) for a list
def fq_list_names(partition, list_names):
if list_names is None:
return None
return map(lambda x: fq_name(partition, x), list_names)
# New style
from abc import ABCMeta, abstractproperty
from collections import defaultdict
try:
from f5.bigip import ManagementRoot as BigIpMgmt
from f5.bigip.contexts import TransactionContextManager as BigIpTxContext
from f5.bigiq import ManagementRoot as BigIqMgmt
from f5.iworkflow import ManagementRoot as iWorkflowMgmt
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
from ansible.module_utils.basic import *
from ansible.module_utils.six import iteritems, with_metaclass
F5_COMMON_ARGS = dict(
server=dict(
type='str',
required=True,
fallback=(env_fallback, ['F5_SERVER'])
),
user=dict(
type='str',
required=True,
fallback=(env_fallback, ['F5_USER'])
),
password=dict(
type='str',
aliases=['pass', 'pwd'],
required=True,
no_log=True,
fallback=(env_fallback, ['F5_PASSWORD'])
),
validate_certs=dict(
default='yes',
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
server_port=dict(
type='int',
default=443,
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
partition=dict(
type='str',
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
class AnsibleF5Client(object):
def __init__(self, argument_spec=None, supports_check_mode=False,
mutually_exclusive=None, required_together=None,
required_if=None, required_one_of=None,
f5_product_name='bigip'):
merged_arg_spec = dict()
merged_arg_spec.update(F5_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params = []
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params = []
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if,
required_one_of=required_one_of
)
self.check_mode = self.module.check_mode
self._connect_params = self._get_connect_params()
try:
self.api = self._get_mgmt_root(
f5_product_name, **self._connect_params
)
except iControlUnexpectedHTTPError as exc:
self.fail(str(exc))
def fail(self, msg):
self.module.fail_json(msg=msg)
def _get_connect_params(self):
params = dict(
user=self.module.params['user'],
password=self.module.params['password'],
server=self.module.params['server'],
server_port=self.module.params['server_port'],
validate_certs=self.module.params['validate_certs']
)
return params
def _get_mgmt_root(self, type, **kwargs):
if type == 'bigip':
return BigIpMgmt(
kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'],
token='tmos'
)
elif type == 'iworkflow':
return iWorkflowMgmt(
kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'],
token='local'
)
elif type == 'bigiq':
return BigIqMgmt(
kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'],
token='local'
)
class AnsibleF5Parameters(object):
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
if params:
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
dict_to_use = self.api_map
map_key = self.api_map[k]
else:
dict_to_use = self._values
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def __getattr__(self, item):
# Ensures that properties that weren't defined, and therefore stashed
# in the `_values` dict, will be retrievable.
return self._values[item]
@property
def partition(self):
if self._values['partition'] is None:
return 'Common'
return self._values['partition'].strip('/')
@partition.setter
def partition(self, value):
self._values['partition'] = value
def _filter_params(self, params):
return dict((k, v) for k, v in iteritems(params) if v is not None)
class F5ModuleError(Exception):
pass
| gpl-3.0 |
karyon/django | tests/forms_tests/tests/tests.py | 16 | 16737 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import models
from django.forms import (
CharField, FileField, Form, ModelChoiceField, ModelForm,
)
from django.forms.models import ModelFormMetaclass
from django.test import SimpleTestCase, TestCase
from django.utils import six
from ..models import (
BoundaryModel, ChoiceFieldModel, ChoiceModel, ChoiceOptionModel, Defaults,
FileModel, Group, OptionalMultiChoiceModel,
)
class ChoiceFieldForm(ModelForm):
class Meta:
model = ChoiceFieldModel
fields = '__all__'
class OptionalMultiChoiceModelForm(ModelForm):
class Meta:
model = OptionalMultiChoiceModel
fields = '__all__'
class ChoiceFieldExclusionForm(ModelForm):
multi_choice = CharField(max_length=50)
class Meta:
exclude = ['multi_choice']
model = ChoiceFieldModel
class EmptyCharLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice']
class EmptyIntegerLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice_integer']
class EmptyCharLabelNoneChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice_string_w_none']
class FileForm(Form):
file1 = FileField()
class TestModelChoiceField(TestCase):
def test_choices_not_fetched_when_not_rendering(self):
"""
Generating choices for ModelChoiceField should require 1 query (#12510).
"""
self.groups = [Group.objects.create(name=name) for name in 'abc']
# only one query is required to pull the model from DB
with self.assertNumQueries(1):
field = ModelChoiceField(Group.objects.order_by('-name'))
self.assertEqual('a', field.clean(self.groups[0].pk).name)
def test_queryset_manager(self):
f = ModelChoiceField(ChoiceOptionModel.objects)
choice = ChoiceOptionModel.objects.create(name="choice 1")
self.assertEqual(list(f.choices), [('', '---------'), (choice.pk, str(choice))])
class TestTicket14567(TestCase):
"""
Check that the return values of ModelMultipleChoiceFields are QuerySets
"""
def test_empty_queryset_return(self):
"If a model's ManyToManyField has blank=True and is saved with no data, a queryset is returned."
option = ChoiceOptionModel.objects.create(name='default')
form = OptionalMultiChoiceModelForm({'multi_choice_optional': '', 'multi_choice': [option.pk]})
self.assertTrue(form.is_valid())
# Check that the empty value is a QuerySet
self.assertIsInstance(form.cleaned_data['multi_choice_optional'], models.query.QuerySet)
# While we're at it, test whether a QuerySet is returned if there *is* a value.
self.assertIsInstance(form.cleaned_data['multi_choice'], models.query.QuerySet)
class ModelFormCallableModelDefault(TestCase):
def test_no_empty_option(self):
"If a model's ForeignKey has blank=False and a default, no empty option is created (Refs #10792)."
option = ChoiceOptionModel.objects.create(name='default')
choices = list(ChoiceFieldForm().fields['choice'].choices)
self.assertEqual(len(choices), 1)
self.assertEqual(choices[0], (option.pk, six.text_type(option)))
def test_callable_initial_value(self):
"The initial value for a callable default returning a queryset is the pk (refs #13769)"
ChoiceOptionModel.objects.create(id=1, name='default')
ChoiceOptionModel.objects.create(id=2, name='option 2')
ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(
ChoiceFieldForm().as_p(),
"""<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="1" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="1" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple="multiple" name="multi_choice" id="id_multi_choice">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="1" id="initial-id_multi_choice_0" /></p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="1" id="initial-id_multi_choice_int_0" /></p>"""
)
def test_initial_instance_value(self):
"Initial instances for model fields may also be instances (refs #7287)"
ChoiceOptionModel.objects.create(id=1, name='default')
obj2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
obj3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(
ChoiceFieldForm(initial={
'choice': obj2,
'choice_int': obj2,
'multi_choice': [obj2, obj3],
'multi_choice_int': ChoiceOptionModel.objects.exclude(name="default"),
}).as_p(),
"""<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="2" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="2" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple="multiple" name="multi_choice" id="id_multi_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3" selected="selected">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="2" id="initial-id_multi_choice_0" />
<input type="hidden" name="initial-multi_choice" value="3" id="initial-id_multi_choice_1" /></p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3" selected="selected">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="2" id="initial-id_multi_choice_int_0" />
<input type="hidden" name="initial-multi_choice_int" value="3" id="initial-id_multi_choice_int_1" /></p>"""
)
class FormsModelTestCase(TestCase):
def test_unicode_filename(self):
# FileModel with unicode filename and data #########################
file1 = SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))
f = FileForm(data={}, files={'file1': file1}, auto_id=False)
self.assertTrue(f.is_valid())
self.assertIn('file1', f.cleaned_data)
m = FileModel.objects.create(file=f.cleaned_data['file1'])
self.assertEqual(m.file.name, 'tests/\u6211\u96bb\u6c23\u588a\u8239\u88dd\u6eff\u6652\u9c54.txt')
m.delete()
def test_boundary_conditions(self):
# Boundary conditions on a PositiveIntegerField #########################
class BoundaryForm(ModelForm):
class Meta:
model = BoundaryModel
fields = '__all__'
f = BoundaryForm({'positive_integer': 100})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': 0})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': -100})
self.assertFalse(f.is_valid())
def test_formfield_initial(self):
# Formfield initial values ########
# If the model has default values for some fields, they are used as the formfield
# initial values.
class DefaultsForm(ModelForm):
class Meta:
model = Defaults
fields = '__all__'
self.assertEqual(DefaultsForm().fields['name'].initial, 'class default value')
self.assertEqual(DefaultsForm().fields['def_date'].initial, datetime.date(1980, 1, 1))
self.assertEqual(DefaultsForm().fields['value'].initial, 42)
r1 = DefaultsForm()['callable_default'].as_widget()
r2 = DefaultsForm()['callable_default'].as_widget()
self.assertNotEqual(r1, r2)
# In a ModelForm that is passed an instance, the initial values come from the
# instance's values, not the model's defaults.
foo_instance = Defaults(name='instance value', def_date=datetime.date(1969, 4, 4), value=12)
instance_form = DefaultsForm(instance=foo_instance)
self.assertEqual(instance_form.initial['name'], 'instance value')
self.assertEqual(instance_form.initial['def_date'], datetime.date(1969, 4, 4))
self.assertEqual(instance_form.initial['value'], 12)
from django.forms import CharField
class ExcludingForm(ModelForm):
name = CharField(max_length=255)
class Meta:
model = Defaults
exclude = ['name', 'callable_default']
f = ExcludingForm({'name': 'Hello', 'value': 99, 'def_date': datetime.date(1999, 3, 2)})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Hello')
obj = f.save()
self.assertEqual(obj.name, 'class default value')
self.assertEqual(obj.value, 99)
self.assertEqual(obj.def_date, datetime.date(1999, 3, 2))
class RelatedModelFormTests(SimpleTestCase):
def test_invalid_loading_order(self):
"""
Test for issue 10405
"""
class A(models.Model):
ref = models.ForeignKey("B", models.CASCADE)
class Meta:
model = A
fields = '__all__'
with self.assertRaises(ValueError):
ModelFormMetaclass(str('Form'), (ModelForm,), {'Meta': Meta})
class B(models.Model):
pass
def test_valid_loading_order(self):
"""
Test for issue 10405
"""
class C(models.Model):
ref = models.ForeignKey("D", models.CASCADE)
class D(models.Model):
pass
class Meta:
model = C
fields = '__all__'
self.assertTrue(issubclass(ModelFormMetaclass(str('Form'), (ModelForm,), {'Meta': Meta}), ModelForm))
class ManyToManyExclusionTestCase(TestCase):
def test_m2m_field_exclusion(self):
# Issue 12337. save_instance should honor the passed-in exclude keyword.
opt1 = ChoiceOptionModel.objects.create(id=1, name='default')
opt2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
opt3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
initial = {
'choice': opt1,
'choice_int': opt1,
}
data = {
'choice': opt2.pk,
'choice_int': opt2.pk,
'multi_choice': 'string data!',
'multi_choice_int': [opt1.pk],
}
instance = ChoiceFieldModel.objects.create(**initial)
instance.multi_choice.set([opt2, opt3])
instance.multi_choice_int.set([opt2, opt3])
form = ChoiceFieldExclusionForm(data=data, instance=instance)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['multi_choice'], data['multi_choice'])
form.save()
self.assertEqual(form.instance.choice.pk, data['choice'])
self.assertEqual(form.instance.choice_int.pk, data['choice_int'])
self.assertEqual(list(form.instance.multi_choice.all()), [opt2, opt3])
self.assertEqual([obj.pk for obj in form.instance.multi_choice_int.all()], data['multi_choice_int'])
class EmptyLabelTestCase(TestCase):
def test_empty_field_char(self):
f = EmptyCharLabelChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" /></p>
<p><label for="id_choice">Choice:</label> <select id="id_choice" name="choice">
<option value="" selected="selected">No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>"""
)
def test_empty_field_char_none(self):
f = EmptyCharLabelNoneChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" /></p>
<p><label for="id_choice_string_w_none">Choice string w none:</label>
<select id="id_choice_string_w_none" name="choice_string_w_none">
<option value="" selected="selected">No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>"""
)
def test_save_empty_label_forms(self):
# Test that saving a form with a blank choice results in the expected
# value being stored in the database.
tests = [
(EmptyCharLabelNoneChoiceForm, 'choice_string_w_none', None),
(EmptyIntegerLabelChoiceForm, 'choice_integer', None),
(EmptyCharLabelChoiceForm, 'choice', ''),
]
for form, key, expected in tests:
f = form({'name': 'some-key', key: ''})
self.assertTrue(f.is_valid())
m = f.save()
self.assertEqual(expected, getattr(m, key))
self.assertEqual('No Preference',
getattr(m, 'get_{}_display'.format(key))())
def test_empty_field_integer(self):
f = EmptyIntegerLabelChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" /></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="" selected="selected">No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>"""
)
def test_get_display_value_on_none(self):
m = ChoiceModel.objects.create(name='test', choice='', choice_integer=None)
self.assertIsNone(m.choice_integer)
self.assertEqual('No Preference', m.get_choice_integer_display())
def test_html_rendering_of_prepopulated_models(self):
none_model = ChoiceModel(name='none-test', choice_integer=None)
f = EmptyIntegerLabelChoiceForm(instance=none_model)
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" value="none-test"/></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="" selected="selected">No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>"""
)
foo_model = ChoiceModel(name='foo-test', choice_integer=1)
f = EmptyIntegerLabelChoiceForm(instance=foo_model)
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" value="foo-test"/></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="">No Preference</option>
<option value="1" selected="selected">Foo</option>
<option value="2">Bar</option>
</select></p>"""
)
| bsd-3-clause |
alqfahad/odoo | addons/mrp_byproduct/__openerp__.py | 259 | 1819 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'MRP Byproducts',
'version': '1.0',
'category': 'Manufacturing',
'description': """
This module allows you to produce several products from one production order.
=============================================================================
You can configure by-products in the bill of material.
Without this module:
--------------------
A + B + C -> D
With this module:
-----------------
A + B + C -> D + E
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/manufacturing',
'depends': ['base', 'mrp'],
'data': [
'security/ir.model.access.csv',
'mrp_byproduct_view.xml'
],
'demo': [],
'test': ['test/mrp_byproduct.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
DistrictDataLabs/django-data-product | irisfinder/views.py | 1 | 1948 | from django.shortcuts import render
import datetime
from models import Iris, SVMModels
from forms import UserIrisData
import sklearn
from sklearn import svm
from sklearn.cross_validation import train_test_split
import numpy as np
from django.conf import settings
import cPickle
import scipy
from pytz import timezone
import random
# Create your views here.
def predict(request):
data = {
"app_name": "irisfinder",
"random_number": random.randint(0, 10000)
}
if request.method == "GET":
form = UserIrisData()
data.update({"form": form, "submit": True})
elif request.method == "POST":
form = UserIrisData(request.POST)
sepal_length = request.POST.get("sepal_length")
sepal_width = request.POST.get("sepal_width")
petal_length = request.POST.get("petal_length")
petal_width = request.POST.get("petal_width")
if request.POST.get('submit'):
user_data = Iris(user_data=True,
sepal_length=sepal_length,
sepal_width=sepal_width,
petal_length=petal_length,
petal_width=petal_width)
user_data.save()
model_object = SVMModels.objects.order_by("-run_date").first()
model = cPickle.loads(model_object.model_pickle)
prediction = model.predict([sepal_length, sepal_width, petal_length, petal_width])
item_pk = user_data.pk
species = prediction[0]
data.update({"form": form, "verify": True, "item_pk": item_pk,
"species": species, "prediction": prediction[0]})
elif request.POST.get('verified'):
user_data = Iris.objects.get(pk=int(request.POST.get("item_pk")))
user_data.species = request.POST.get("species")
user_data.save()
return render(request, "predict.html", context=data) | apache-2.0 |
albertrdixon/CouchPotatoServer | libs/CodernityDB/rr_cache.py | 82 | 3673 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from random import choice
def cache1lvl(maxsize=100):
def decorating_function(user_function):
cache1lvl = {}
@functools.wraps(user_function)
def wrapper(key, *args, **kwargs):
try:
result = cache1lvl[key]
except KeyError:
if len(cache1lvl) == maxsize:
for i in xrange(maxsize // 10 or 1):
del cache1lvl[choice(cache1lvl.keys())]
cache1lvl[key] = user_function(key, *args, **kwargs)
result = cache1lvl[key]
# result = user_function(obj, key, *args, **kwargs)
return result
def clear():
cache1lvl.clear()
def delete(key):
try:
del cache1lvl[key]
return True
except KeyError:
return False
wrapper.clear = clear
wrapper.cache = cache1lvl
wrapper.delete = delete
return wrapper
return decorating_function
def cache2lvl(maxsize=100):
def decorating_function(user_function):
cache = {}
@functools.wraps(user_function)
def wrapper(*args, **kwargs):
# return user_function(*args, **kwargs)
try:
result = cache[args[0]][args[1]]
except KeyError:
# print wrapper.cache_size
if wrapper.cache_size == maxsize:
to_delete = maxsize // 10 or 1
for i in xrange(to_delete):
key1 = choice(cache.keys())
key2 = choice(cache[key1].keys())
del cache[key1][key2]
if not cache[key1]:
del cache[key1]
wrapper.cache_size -= to_delete
# print wrapper.cache_size
result = user_function(*args, **kwargs)
try:
cache[args[0]][args[1]] = result
except KeyError:
cache[args[0]] = {args[1]: result}
wrapper.cache_size += 1
return result
def clear():
cache.clear()
wrapper.cache_size = 0
def delete(key, inner_key=None):
if inner_key:
try:
del cache[key][inner_key]
if not cache[key]:
del cache[key]
wrapper.cache_size -= 1
return True
except KeyError:
return False
else:
try:
wrapper.cache_size -= len(cache[key])
del cache[key]
return True
except KeyError:
return False
wrapper.clear = clear
wrapper.cache = cache
wrapper.delete = delete
wrapper.cache_size = 0
return wrapper
return decorating_function
| gpl-3.0 |
linux-shield/kernel | tools/perf/scripts/python/syscall-counts-by-pid.py | 1996 | 2105 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
tarsqi/ttk | docmodel/docstructure_parser.py | 1 | 3957 | """Document Structure Parser.
This module contains a minimal document structure parser. It is meant as a
temporary default and will be replaced by more sophisticated parsers and these
parsers will act more like the other tarsqi components.
The main goal of the parser is to add docelement tags to the tag repository on
the TarsqiDocument. Sometimes docelement tags already exist in the tag
repository (for example when reading a ttk file), in which case the parser does
nothing. Otherwise, the parser calls a simple method to recognize paragraphs and
creates a docelement Tag for each of them.
The docelements are used by Tarsqi components by looping over them and
processing the elements one by one.
"""
from __future__ import absolute_import
from library.tarsqi_constants import DOCSTRUCTURE
class DocumentStructureParser(object):
"""Simple document structure parser used as a default if no structure tags are
found in the tag repository of the TarsqiDocument."""
def parse(self, tarsqidoc):
"""Apply a default document structure parser to the TarsqiDocument if
there are no docelement tags in the tags repository. The parser uses
white lines to separate the paragraphs."""
doc_elements = tarsqidoc.tags.find_tags('docelement')
if not doc_elements:
element_offsets = split_paragraphs(tarsqidoc.sourcedoc.text)
count = 0
for (p1, p2) in element_offsets:
count += 1
pid = "d%s" % count
feats = {'id': pid, 'type': 'paragraph', 'origin': DOCSTRUCTURE}
tarsqidoc.tags.add_tag('docelement', p1, p2, feats)
def split_paragraphs(text):
"""Very simplistic way to split a paragraph into more than one paragraph,
simply by looking for an empty line."""
text_end = len(text)
(par_begin, par_end) = (None, None)
(p1, p2, space) = slurp_space(text, 0)
par_begin = p2
seeking_space = False
paragraphs = []
last_para = (None, None)
while p2 < text_end:
if not seeking_space:
(p1, p2, token) = slurp_token(text, p2)
par_end = p2
seeking_space = True
else:
(p1, p2, space) = slurp_space(text, p2)
seeking_space = False
if space.count("\n") > 1:
par_end = p1
last_para = (par_begin, par_end)
paragraphs.append((par_begin, par_end))
par_begin = p2
par_end = None
# print('TOK', p1, p2, par_begin, par_end, seeking_space, space)
if seeking_space and p2 > par_begin:
last_para = (par_begin, par_end)
paragraphs.append((par_begin, par_end))
# deal with the boundary case where there are no empty lines at the end
if not last_para == (par_begin, par_end) and par_end is not None:
paragraphs.append((par_begin, par_end))
return paragraphs
def slurp(text, offset, test):
"""Starting at offset in text, find a substring where all characters pass
test. Return the begin and end position and the substring."""
begin = offset
end = offset
length = len(text)
while offset < length:
char = text[offset]
if test(char):
offset += 1
end = offset
else:
return begin, end, text[begin:end]
return begin, end, text[begin:end]
def slurp_space(text, offset):
"""Starting at offset consume a string of space characters, then return the
begin and end position and the consumed string."""
def test_space(char):
return char.isspace()
return slurp(text, offset, test_space)
def slurp_token(text, offset):
"""Starting at offset consume a string of non-space characters, then return
the begin and end position and the consumed string."""
def test_nonspace(char):
return not char.isspace()
return slurp(text, offset, test_nonspace)
| apache-2.0 |
KaiRo-at/socorro | socorro/unittest/external/es/base.py | 2 | 29371 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mock
import random
import uuid
from distutils.version import LooseVersion
from functools import wraps
from elasticsearch.helpers import bulk
from configman import ConfigurationManager, environment
from nose import SkipTest
from socorro.external.es.base import ElasticsearchConfig
from socorro.external.es.index_creator import IndexCreator
from socorro.external.es.supersearch import SuperSearch
from socorro.external.es.super_search_fields import SuperSearchFields
from socorro.unittest.testbase import TestCase
DEFAULT_VALUES = {
'elasticsearch.elasticsearch_class': (
'socorro.external.es.connection_context.ConnectionContext'
),
'resource.elasticsearch.elasticsearch_default_index': (
'socorro_integration_test'
),
'resource.elasticsearch.elasticsearch_index': (
'socorro_integration_test_reports'
),
'resource.elasticsearch.elasticsearch_timeout': 10,
}
CRON_JOB_EXTA_VALUES = {
'resource.elasticsearch.backoff_delays': [1],
}
SUPERSEARCH_FIELDS = {
'signature': {
'name': 'signature',
'in_database_name': 'signature',
'data_validation_type': 'str',
'query_type': 'str',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': True,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'multi_field',
'fields': {
'signature': {
'type': 'string'
},
'full': {
'type': 'string',
'index': 'not_analyzed'
}
}
},
},
'product': {
'name': 'product',
'in_database_name': 'product',
'data_validation_type': 'enum',
'query_type': 'enum',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': True,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'multi_field',
'fields': {
'product': {
'type': 'string'
},
'full': {
'type': 'string',
'index': 'not_analyzed'
}
}
},
},
'version': {
'name': 'version',
'in_database_name': 'version',
'data_validation_type': 'enum',
'query_type': 'enum',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'string',
'analyzer': 'keyword'
},
},
'platform': {
'name': 'platform',
'in_database_name': 'os_name',
'data_validation_type': 'enum',
'query_type': 'enum',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': True,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'multi_field',
'fields': {
'os_name': {
'type': 'string'
},
'full': {
'type': 'string',
'index': 'not_analyzed'
}
}
},
},
'release_channel': {
'name': 'release_channel',
'in_database_name': 'release_channel',
'data_validation_type': 'enum',
'query_type': 'enum',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'string'
},
},
'date': {
'name': 'date',
'in_database_name': 'date_processed',
'data_validation_type': 'datetime',
'query_type': 'date',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'date',
'format': (
'yyyy-MM-dd\'T\'HH:mm:ssZZ||yyyy-MM-dd\'T\'HH:mm:ss.SSSSSSZZ'
)
},
},
'address': {
'name': 'address',
'in_database_name': 'address',
'data_validation_type': 'str',
'query_type': 'str',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'string'
},
},
'build_id': {
'name': 'build_id',
'in_database_name': 'build',
'data_validation_type': 'int',
'query_type': 'number',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'long'
},
},
'reason': {
'name': 'reason',
'in_database_name': 'reason',
'data_validation_type': 'str',
'query_type': 'str',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'string'
},
},
'email': {
'name': 'email',
'in_database_name': 'email',
'data_validation_type': 'str',
'query_type': 'str',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': ['crashstats.view_pii'],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'string',
'analyzer': 'keyword'
},
},
'url': {
'name': 'url',
'in_database_name': 'url',
'data_validation_type': 'str',
'query_type': 'str',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': ['crashstats.view_pii'],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'string',
'analyzer': 'keyword'
},
},
'uuid': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'uuid',
'is_exposed': False,
'is_mandatory': False,
'is_returned': True,
'name': 'uuid',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'index': 'not_analyzed',
'type': 'string'
}
},
'process_type': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': [
'any', 'browser', 'plugin', 'content', 'all'
],
'has_full_version': False,
'in_database_name': 'process_type',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'process_type',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'type': 'string'
}
},
'user_comments': {
'data_validation_type': 'str',
'default_value': None,
'form_field_choices': None,
'has_full_version': True,
'in_database_name': 'user_comments',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'user_comments',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'string',
'storage_mapping': {
'fields': {
'full': {
'index': 'not_analyzed',
'type': 'string'
},
'user_comments': {
'type': 'string'
}
},
'type': 'multi_field'
}
},
'accessibility': {
'data_validation_type': 'bool',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'Accessibility',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'accessibility',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'bool',
'storage_mapping': {
'type': 'boolean'
}
},
'b2g_os_version': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'B2G_OS_Version',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'b2g_os_version',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'analyzer': 'keyword',
'type': 'string'
}
},
'bios_manufacturer': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'BIOS_Manufacturer',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'bios_manufacturer',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'analyzer': 'keyword',
'type': 'string'
}
},
'vendor': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'Vendor',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'vendor',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'type': 'string'
}
},
'useragent_locale': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'useragent_locale',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'useragent_locale',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'analyzer': 'keyword',
'type': 'string'
}
},
'is_garbage_collecting': {
'data_validation_type': 'bool',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'IsGarbageCollecting',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'is_garbage_collecting',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'bool',
'storage_mapping': {
'type': 'boolean'
}
},
'available_virtual_memory': {
'data_validation_type': 'int',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'AvailableVirtualMemory',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'available_virtual_memory',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'number',
'storage_mapping': {
'type': 'long'
}
},
'install_age': {
'data_validation_type': 'int',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'install_age',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'install_age',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'number',
'storage_mapping': {
'type': 'long'
}
},
'plugin_filename': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': True,
'in_database_name': 'PluginFilename',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'plugin_filename',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'fields': {
'PluginFilename': {
'index': 'analyzed',
'type': 'string'
},
'full': {
'index': 'not_analyzed',
'type': 'string'
}
},
'type': 'multi_field'
}
},
'plugin_name': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': True,
'in_database_name': 'PluginName',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'plugin_name',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'fields': {
'PluginName': {
'index': 'analyzed',
'type': 'string'
},
'full': {
'index': 'not_analyzed',
'type': 'string'
}
},
'type': 'multi_field'
}
},
'plugin_version': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': True,
'in_database_name': 'PluginVersion',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'plugin_version',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'fields': {
'PluginVersion': {
'index': 'analyzed',
'type': 'string'
},
'full': {
'index': 'not_analyzed',
'type': 'string'
}
},
'type': 'multi_field'
}
},
'android_model': {
'data_validation_type': 'str',
'default_value': None,
'form_field_choices': None,
'has_full_version': True,
'in_database_name': 'Android_Model',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'android_model',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'string',
'storage_mapping': {
'fields': {
'Android_Model': {
'type': 'string'
},
'full': {
'index': 'not_analyzed',
'type': 'string'
}
},
'type': 'multi_field'
}
},
'dump': {
'data_validation_type': 'str',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'dump',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'dump',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'string',
'storage_mapping': {
'index': 'not_analyzed',
'type': 'string'
}
},
'cpu_info': {
'data_validation_type': 'str',
'default_value': None,
'form_field_choices': None,
'has_full_version': True,
'in_database_name': 'cpu_info',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'cpu_info',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'string',
'storage_mapping': {
'fields': {
'cpu_info': {
'analyzer': 'standard',
'index': 'analyzed',
'type': 'string'
},
'full': {
'index': 'not_analyzed',
'type': 'string'
}
},
'type': 'multi_field'
}
},
'dom_ipc_enabled': {
'data_validation_type': 'bool',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'DOMIPCEnabled',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'dom_ipc_enabled',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'bool',
'storage_mapping': {
'null_value': False,
'type': 'boolean'
}
},
'app_notes': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'app_notes',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'app_notes',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'type': 'string'
}
},
'hang_type': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': [
'any', 'crash', 'hang', 'all'
],
'has_full_version': False,
'in_database_name': 'hang_type',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'hang_type',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'type': 'short'
}
},
'exploitability': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': [
'high', 'normal', 'low', 'none', 'unknown', 'error'
],
'has_full_version': False,
'in_database_name': 'exploitability',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'exploitability',
'namespace': 'processed_crash',
'permissions_needed': [
'crashstats.view_exploitability'
],
'query_type': 'enum',
'storage_mapping': {
'type': 'string'
}
},
'platform_version': {
'data_validation_type': 'str',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'os_version',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'platform_version',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'string',
'storage_mapping': {
'type': 'string'
}
},
'write_combine_size': {
'data_validation_type': 'int',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'write_combine_size',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'write_combine_size',
'namespace': 'processed_crash.json_dump',
'permissions_needed': [],
'query_type': 'number',
'storage_mapping': {
'type': 'long'
}
},
'app_init_dlls': {
'data_validation_type': 'str',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'app_init_dlls',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'app_init_dlls',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'string',
'storage_mapping': {
'type': 'string',
'analyzer': 'semicolon_keywords',
}
},
# Add a synonym field.
'product_2': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'product',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'product_2',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
},
# Add a fake field.
'fake_field': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'fake_field',
'is_exposed': True,
'is_mandatory': False,
'is_returned': False,
'name': 'fake_field',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'enum',
},
}
def minimum_es_version(minimum_version):
"""Skip the test if the Elasticsearch version is less than specified.
:arg minimum_version: string; the minimum Elasticsearch version required
"""
def decorated(test):
"""Decorator to only run the test if ES version is greater or
equal than specified.
"""
@wraps(test)
def test_with_version(self):
"Only run the test if ES version is not less than specified."
actual_version = self.connection.info()['version']['number']
if LooseVersion(actual_version) >= LooseVersion(minimum_version):
test(self)
else:
raise SkipTest
return test_with_version
return decorated
class SuperSearchWithFields(SuperSearch):
"""SuperSearch's get method requires to be passed the list of all fields.
This class does that automatically so we can just use `get()`. """
def get(self, **kwargs):
kwargs['_fields'] = SuperSearchFields(config=self.config).get_fields()
return super(SuperSearchWithFields, self).get(**kwargs)
class ElasticsearchTestCase(TestCase):
"""Base class for Elastic Search related unit tests. """
def __init__(self, *args, **kwargs):
super(ElasticsearchTestCase, self).__init__(*args, **kwargs)
self.config = self.get_base_config()
es_context = self.config.elasticsearch.elasticsearch_class(
config=self.config.elasticsearch
)
creator_config = self.get_tuned_config(IndexCreator)
self.index_creator = IndexCreator(creator_config)
self.index_client = self.index_creator.get_index_client()
with es_context() as conn:
self.connection = conn
def setUp(self):
# Create the supersearch fields.
self.index_super_search_fields()
self.index_creator.create_socorro_index(
self.config.elasticsearch.elasticsearch_index
)
super(ElasticsearchTestCase, self).setUp()
def tearDown(self):
# Clear the test indices.
self.index_client.delete(
self.config.elasticsearch.elasticsearch_default_index
)
self.index_client.delete(
self.config.elasticsearch.elasticsearch_index
)
super(ElasticsearchTestCase, self).tearDown()
def health_check(self):
self.connection.cluster.health(
wait_for_status='yellow',
request_timeout=1
)
def get_tuned_config(self, sources, extra_values=None):
if not isinstance(sources, (list, tuple)):
sources = [sources]
mock_logging = mock.Mock()
config_definitions = []
for source in sources:
conf = source.get_required_config()
conf.add_option('logger', default=mock_logging)
config_definitions.append(conf)
values_source = DEFAULT_VALUES.copy()
values_source.update({'logger': mock_logging})
if extra_values:
values_source.update(extra_values)
config_manager = ConfigurationManager(
config_definitions,
app_name='testapp',
app_version='1.0',
app_description='Elasticsearch integration tests',
values_source_list=[environment, values_source],
argv_source=[],
)
return config_manager.get_config()
def get_base_config(self, es_index=None):
extra_values = None
if es_index:
extra_values = {
'resource.elasticsearch.elasticsearch_index': es_index
}
return self.get_tuned_config(
ElasticsearchConfig,
extra_values=extra_values
)
def index_super_search_fields(self, fields=None):
if fields is None:
fields = SUPERSEARCH_FIELDS
es_index = self.config.elasticsearch.elasticsearch_default_index
actions = []
for name, field in fields.iteritems():
action = {
'_index': es_index,
'_type': 'supersearch_fields',
'_id': name,
'_source': field,
}
actions.append(action)
bulk(
client=self.connection,
actions=actions,
)
self.index_client.refresh(index=[es_index])
def index_crash(self, processed_crash, raw_crash=None, crash_id=None):
if crash_id is None:
crash_id = str(uuid.UUID(int=random.getrandbits(128)))
if raw_crash is None:
raw_crash = {}
doc = {
'crash_id': crash_id,
'processed_crash': processed_crash,
'raw_crash': raw_crash,
}
res = self.connection.index(
index=self.config.elasticsearch.elasticsearch_index,
doc_type=self.config.elasticsearch.elasticsearch_doctype,
id=crash_id,
body=doc,
)
return res['_id']
def index_many_crashes(
self, number, processed_crash=None, raw_crash=None, loop_field=None
):
if processed_crash is None:
processed_crash = {}
if raw_crash is None:
raw_crash = {}
actions = []
for i in range(number):
crash_id = str(uuid.UUID(int=random.getrandbits(128)))
if loop_field is not None:
processed_copy = processed_crash.copy()
processed_copy[loop_field] = processed_crash[loop_field] % i
else:
processed_copy = processed_crash
doc = {
'crash_id': crash_id,
'processed_crash': processed_copy,
'raw_crash': raw_crash,
}
action = {
'_index': self.config.elasticsearch.elasticsearch_index,
'_type': self.config.elasticsearch.elasticsearch_doctype,
'_id': crash_id,
'_source': doc,
}
actions.append(action)
bulk(
client=self.connection,
actions=actions,
)
self.refresh_index()
def refresh_index(self, es_index=None):
self.index_client.refresh(
index=es_index or self.config.elasticsearch.elasticsearch_index
)
| mpl-2.0 |
crossbridge-community/avmplus | test/performance/runtests.py | 5 | 53768 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5)
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import os, sys, getopt, datetime, pipes, glob, itertools,socket
import tempfile, string, re, platform, traceback
import subprocess, math, types
from os.path import *
from os import getcwd,environ,walk
from datetime import datetime
from glob import glob
from sys import argv, exit
from getopt import getopt
from itertools import count
from time import time
# add parent dir to python module search path
sys.path.append('..')
from util.convertAcceptanceToJunit import *
try:
from util.runtestBase import RuntestBase
from util.runtestUtils import *
except ImportError:
print("Import error. Please make sure that the test/acceptance/util directory has been deleted.")
print(" (directory has been moved to test/util).")
# Constants
DEFAULT_TRUNCATE_LEN = 6 # default len of number fields when displaying results
class PerformanceRuntest(RuntestBase):
avm2 = ''
avmname = ''
avmDefaultName = 'avm'
avm2DefaultName = 'avm2'
besttime = 'best'
besttime2 = 'best2'
avm2name = ''
currentDir = ''
displayMetrics = []
iterations = 1
vmname = 'unknown' # name sent to socketserver
memory = False
avmrevision = ''
avm2version = ''
avm2revision = ''
detail = False
fullpath = False
raw = False
repo = ''
vmargs2 = ''
optimize = True
perfm = False
tmpfile = None
osName = ''
logresults = False
logConfigAppend = ''
socketlogFile = None
socketlogFailure = False
serverHost = '10.60.48.47'
serverPort = 1188
finalexitcode = 0
# testData structure:
# { testName : { metric : { results1/2 : [], best1/2 : num, avg1/2 : num, spdup : num }}}
testData = {} # dict that stores all test results and calculations
csvAppend = False
metricInfo = {} # dict that holds info on each metric run
currentMetric = ''
indexFile = '' # indexFile that is used to compute normalized results
testIndexDict = {} # dict used to store test indexes
saveIndex = False # save results from avm1 to saveIndexFile
score = False # compute scores ?
# score1 & 2 hold the geometric mean of all tests run. Means are kept seperate for each
# metric, so the dictionary keeps a key for each metric:
# e.g: {'metric' : {'score':runningScore, 'count':count}, 'v8' : {'score':3.14, 'count':1} }
score1 = {}
score2 = {}
# formatting vars
testFieldLen = 27 # field length for test name and path
resultWidth = 8 # width of result columns
# dict for storing aot compile times
aot_compile_times={}
# Index file header
testIndexHeaderString = '''
# The testindex file contains a list of test results that are used to normalize
# all current results using these provided values. This enables us to equally
# weight the results of each test irrespective of the actual test runtime.
# In order to simplify the importing and processing of this file, the values are
# kept in a python dictionary which MUST be named testIndexDict
# The dictionary format is:
# 'testName':{'metric':value}
'''
def __init__(self):
RuntestBase.__init__(self)
def __str__(self):
return 'PerformanceRuntest'
def run(self):
self.setEnvironVars()
self.loadPropertiesFile()
self.setOptions()
self.parseOptions()
self.altsearchpath='../../other-licenses/test/performance/'
self.setTimestamp()
self.checkPath(['avm2'])
self.determineOS()
# Load the root testconfig file
self.settings, self.directives = self.parseTestConfig(self.testconfig)
self.tests = self.getTestsList(self.args)
if self.rebuildtests:
self.rebuildTests()
exit()
# Load root .asc_args and .java_args files
self.parseRootConfigFiles()
self.loadMetricInfo()
self.preProcessTests()
self.printHeader()
self.runTests(self.tests)
if self.csv:
self.outputCsvToFile()
if self.score:
self.printScoreSummary()
if self.saveIndex:
self.outputTestIndexFile()
if self.junitlog:
outfile=convertPerformanceToJunit(self.junitlog+'.txt',self.junitlog+'.xml',self.junitlogname)
print("wrote results in junit format to %s" % outfile)
#self.cleanup()
def getTestsList(self, args):
'''If an index file is being used, we only run the files in the index list'''
# when --saveIndex is specified along with --index, run the specified files
# and merge it with the --index values
if self.indexFile and not self.saveIndex:
indexTests = sorted(self.testIndexDict.keys())
if args[0] == '.':
return indexTests
else:
# only run the union of tests in indexTests and testsToRun
testsToRun = list(set(indexTests) &
set(RuntestBase.getTestsList(self, args)))
if not testsToRun:
exit('There are no tests in the indexfile %s that match %s'
% (self.indexFile, args))
return testsToRun
return RuntestBase.getTestsList(self, args)
def setEnvironVars(self):
RuntestBase.setEnvironVars(self)
if 'AVM2' in environ:
self.avm2 = environ['AVM2'].strip()
if 'VMARGS2' in environ:
self.vmargs2 = environ['VMARGS2'].strip()
def usage(self, c):
RuntestBase.usage(self, c)
print(" -S --avm2 second avmplus command to use")
print(" --avmname nickname for avm to use as column header")
print(" --avm2name nickname for avm2 to use as column header")
print(" --detail display results in 'old-style' format")
print(" --raw output all raw test values")
print(" -i --iterations number of times to repeat test")
print(" -l --log logs results to a file")
print(" -k --socketlog logs results to a socket server")
print(" -r --runtime name of the runtime VM used, including switch info eg. TTVMi (tamarin-tracing interp)")
print(" -m --memory logs the high water memory mark")
print(" --metrics= display specified metrics: either a comma-separated list of")
print(" metrics names (e.g. v8), or the keyword all.")
print(" --vmversion specify vmversion e.g. 502, use this if cannot be calculated from executable")
print(" --vm2version specify version of avm2")
print(" --vmargs2 args to pass to avm2, if not specified --vmargs will be used")
print(" --nooptimize do not optimize files when compiling")
print(" --perfm parse the perfm results from avm")
print(" --csv= also output to csv file, filename required")
print(" --csvappend append to csv file instead of overwriting")
print(" --score compute and print geometric mean of scores")
print(" --index= index file to use (must end with .py)")
print(" --saveindex= save results to given index file name")
print(" --fullpath print out full path for each test")
print(" --repo= repository url (used when logging to performance db)")
print(" --logConfigAppend= string to append to the config string that is logged to the database along with vmargs")
exit(c)
def setOptions(self):
RuntestBase.setOptions(self)
self.options += 'S:i:lkr:mp'
self.longOptions.extend(['avm2=','avmname=','avm2name=','iterations=','log=','socketlog',
'runtime=','memory','metrics=','larger','vmversion=', 'vm2version=',
'vmargs2=','nooptimize', 'score', 'saveindex=', 'index=',
'perfm','csv=', 'csvappend','prettyprint', 'detail', 'raw',
'fullpath', 'repo=', 'logConfigAppend='])
def parseOptions(self):
opts = RuntestBase.parseOptions(self)
for o, v in opts:
if o in ('-S', '--avm2'):
self.avm2 = v
elif o in ('--avmname',):
self.avmname = v
elif o in ('--avm2name',):
self.avm2name = v
elif o in ('-i', '--iterations'):
try:
self.iterations = int(v)
except ValueError:
print('Incorrect iterations value: %s\n' % v)
self.usage(2)
elif o in ('-l','--log'):
self.logFileType='log'
self.createOutputFile()
elif o in ('-k', '--socketlog'):
self.logresults = True
elif o in ('-r', '--runtime'):
self.vmname = v
elif o in ('-m', '--memory'):
self.memory = True
elif o in ('--metrics'):
self.displayMetrics = v.strip().lower().split(',')
if 'memory' in self.displayMetrics:
self.memory = True
del self.displayMetrics[self.displayMetrics.index('memory')]
elif o in ('--vmversion',):
self.avmrevision = self.avmversion = v
elif o in ('--vm2version',):
self.avm2revision = self.avm2version = v
elif o in ('--vmargs2',):
self.vmargs2 = v
elif o in ('--nooptimize',):
self.optimize = False
elif o in ('--perfm',):
self.perfm = True
self.resultWidth = 8 # perfm results can be pretty large
elif o in ('--csv',):
self.csv = True
self.csvfile = v
elif o in ('--csvappend',):
self.csvAppend = True
elif o in ('--score',):
self.score = True
elif o in ('--detail',):
self.detail = True
elif o in ('--raw',):
self.raw = True
elif o in ('--index',):
self.loadIndexFile(v)
elif o in ('--saveindex',):
self.saveIndex = True
self.saveIndexFile = v
elif o in ('--fullpath',):
self.fullpath = True
elif o in ('--repo',):
self.repo = v
elif o in ('--logConfigAppend',):
self.logConfigAppend = v
self.avmname = self.avmname or self.avmDefaultName
self.avm2name = self.avm2name or self.avm2DefaultName
def loadIndexFile(self, indexFile):
# The indexFile contains values used to normalize
# the test run results. For simplicity it is a python
# file that defines a single dictionary: testIndexDict
# That dicitonary is dynamically loaded here and then assigned
# to the classvar of the same name
self.indexFile = indexFile[:-3] if indexFile.endswith('.py') else indexFile
try:
exec('from %s import testIndexDict' % self.indexFile)
self.testIndexDict = testIndexDict
except ImportError:
# TODO: friendlyfy this error message
print('Error attempting to import %s:' % self.indexFile)
raise
def compile_test(self, as_file):
if not isfile(self.shellabc):
exit("ERROR: shell.abc %s does not exist, SHELLLABC environment variable or --shellabc must be set to shell.abc" % self.shellabc)
args = []
args.append('-import %s' % self.shellabc)
if self.optimize:
args.append('-optimize -AS3')
debugoutput = []
RuntestBase.compile_test(self, as_file, args, debugoutput)
self.printOutput(None, debugoutput)
if self.aotsdk and self.aotout:
if isfile(splitext(as_file)[0] + ".abc"):
startTime=time()
RuntestBase.compile_aot(self, splitext(as_file)[0] + ".abc")
self.aot_compile_times[as_file]=time()-startTime
def socketlog(self, msg):
if not self.socketlogFile:
file="socketlog-%s.txt" % self.avmversion
ctr=0
while os.path.exists(file):
ctr += 1
file = "socketlog-%s-%s.txt" % (self.avmversion,ctr)
self.socketlogFile=file
open(self.socketlogFile,'a').write(msg)
if self.socketlogFailure == False:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create a TCP socket
s.settimeout(10) # set socket timeout to 10s
s.connect((self.serverHost, self.serverPort)) # connect to server on the port
s.send("%s;exit\r\n" % msg) # send the data
data = s.recv(1024)
#print('Sent: %s' % msg)
#print('Received: %s \n\n' % data)
#s.shutdown(SHUT_RDWR)
s.close()
except :
print("ERROR: Socket error occured:")
print(sys.exc_info())
print('buildbot_status: WARNINGS')
self.socketlogFailure = True
self.finalexitcode = 1
def loadMetricInfo(self):
'''load metric information from metricinfo.py'''
try:
from metricinfo import metric_info
except ImportError:
print('Error loading metricinfo.py file.')
raise
# verify that the loaded metric_info dictionary is valid
for metric in metric_info:
if 'best' not in metric_info[metric]:
print('\nWarning: metricinfo.py - the %s metric does not have a "best" key defined - defaulting to min\n' % metric)
elif not isinstance(metric_info[metric]['best'],(types.FunctionType,types.BuiltinFunctionType)):
print('\nWarning: metricinfo.py - the "best" value for the %s metric must be a function - defaulting to min' % metric)
# if the metric does not have a largerIsFaster value defined, default it to False
if 'largerIsFaster' not in metric_info[metric]:
metric_info[metric]['largerIsFaster'] = False
self.metricInfo = metric_info
def preProcessTests(self):
'Code that must be executed before beginning a testrun'
if self.logresults:
# determine current config string for socketlog
tmpvmargs = self.vmargs.replace("-AOTSIZE", "") # This is a fake vmarg that is used for reporting filesize, do not log to db
self.log_config = "%s" % tmpvmargs.replace(" ", "")
self.log_config = "%s" % self.log_config.replace("\"", "")
if self.log_config.find("-memlimit")>-1:
self.log_config=self.log_config[0:self.log_config.find("-memlimit")]
self.log_config = self.log_config.replace("-memstats","")
self.log_config += self.logConfigAppend
if not self.aotsdk:
self.checkExecutable(self.avm, 'AVM environment variable or --avm must be set to avmplus')
if not self.avmversion:
self.avmversion = self.getAvmVersion(self.avm)
if not self.avmrevision:
self.avmrevision = self.getAvmRevision(self.avmversion)
if self.avm2:
self.checkExecutable(self.avm2, '--avm2 must be set to avmplus')
if not self.avm2version:
self.avm2version = self.getAvmVersion(self.avm2)
if not self.avm2revision:
self.avm2revision = self.getAvmRevision(self.avm2version)
else: # only one avm being run
self.testFieldLen = 50
def printHeader(self):
'Print run info and headers'
self.js_print('Executing %d test(s)' % len(self.tests), overrideQuiet=True)
self.js_print("%s: %s %s version: %s" % (self.avmname, self.avm, self.vmargs, self.avmversion))
if self.avm2:
self.js_print("%s: %s %s version: %s" % (self.avm2name, self.avm2, self.vmargs2, self.avm2version))
self.avmDefaultName += ':'+self.avmrevision
self.js_print('iterations: %s' % self.iterations)
if self.indexFile:
self.js_print('index mode enabled, values of -1 indicate that no index value is present for that test')
if self.avm2:
self.avm2DefaultName += ':'+self.avm2revision
if self.iterations == 1:
self.js_print('\n{0:>{1}}{2:>{3}}'.format(self.avmname, self.testFieldLen+self.resultWidth-2,
self.avm2name, self.resultWidth+1))
self.js_print('%-*s %5s %7s %9s\n' % (self.testFieldLen, 'test', 'avg',
'avg', '%diff'))
else: # multiple iterations
if self.detail:
# Original old-school header - deprecated
self.js_print('\n%-*s %-33s %-33s' % (self.testFieldLen, '', self.avmname, self.avm2name))
self.js_print('%-*s %7s :%7s %7s %6s %7s :%7s %7s %6s %7s %8s' % (self.testFieldLen, 'test', 'min','max','avg','stdev','min','max','avg','stdev','%diff','sig '))
self.js_print('%*s --------------------------------- --------------------------------- ----- --------' % (self.testFieldLen, '') )
elif pythonVersion26():
# python >= 2.6
self.js_print('')
self.js_print('{0:>{1}}{2:>{3}}'.format(self.avmname,self.testFieldLen+self.resultWidth*2,
self.avm2name, self.resultWidth*2))
self.js_print('{0:<{testwidth}}{1:>{rw}}{2:>{rw}}{3:>{rw}}{4:>{rw}}{5:>{rw}}{6:>{rw}}'.
format('test', 'best', 'avg', 'best', 'avg', '%dBst', '%dAvg',
testwidth=self.testFieldLen, rw=self.resultWidth))
else:
# python <= 2.5
self.js_print('\n%-*s %-*s %-*s' % (self.testFieldLen+self.resultWidth-4, '',
self.resultWidth*2, self.avmname,
self.resultWidth*2, self.avm2name))
self.js_print('%-*s %*s %*s %*s %*s %*s %*s' % (self.testFieldLen, 'test',
self.resultWidth, 'best',
self.resultWidth, 'avg',
self.resultWidth, 'best',
self.resultWidth, 'avg',
self.resultWidth, '%dBst',
self.resultWidth, '%dAvg'))
else: # only one avm
if (self.iterations>1):
self.js_print(('\n\n%-*s %6s %7s %12s\n') % (self.testFieldLen, 'test',
self.besttime,
'avg','95%_conf'))
else:
self.js_print("\n\n%-*s %7s \n" % (self.testFieldLen, "test", "result"))
def runTests(self, testList):
testnum = len(testList)
for t in testList:
testnum -= 1
o = self.runTest((t, testnum))
def parseMemHigh(self, line):
memoryhigh = 0
tokens=line.rsplit()
if len(tokens)>4:
_mem=tokens[3]
if _mem.startswith('('):
_mem=_mem[1:]
if _mem.endswith(')'):
_mem=_mem[:-1]
if _mem.endswith('M'):
val=float(_mem[:-1])*1024
else:
val=float(_mem[:-1])
if val>memoryhigh:
memoryhigh=val
return memoryhigh
def loadTestSettings(self, dir, testname):
settings = {}
includes = self.includes #list
# get settings for this test (from main testconfig file loaded into self.settings)
for k in self.settings.keys():
if re.search('^'+k+'$', testname):
for k2 in self.settings[k].keys():
if k2 in settings:
settings[k2].update(self.settings[k][k2])
else:
settings[k2] = self.settings[k][k2].copy()
if isfile(join(dir,self.testconfig)):
localIncludes, localSettings = self.parseTestConfig(dir)
# have a local testconfig, so we create a copy of the global settings to not overwrite
includes = list(self.includes) #copy list - don't use reference
includes.extend(localIncludes)
if testname in localSettings:
settings.update(localSettings[testname])
return settings, includes
def parsePerfTestOutput(self, output, resultDict):
'''Parse the given lines of output for test results'''
if self.debug:
print(output)
if self.memory:
memoryhigh = 0
for line in output:
if '[mem]' in line and 'mmgc' in line:
tempmem = self.parseMemHigh(line)
if tempmem > memoryhigh:
memoryhigh = tempmem
resultDict.setdefault('memory', []).append(memoryhigh)
# only display memory unless user is asking for other metrics
if not self.displayMetrics:
return
if self.perfm:
# These metrics are NOT prefaced with the metric keyword
for line in output:
result = line.strip().split(' ')[-2]
if 'verify & IR gen' in line:
resultDict.setdefault('vprof-verify-time', []).append(int(result))
elif 'code ' in line:
resultDict.setdefault('vprof-code-size', []).append(int(result))
elif 'compile ' in line:
resultDict.setdefault('vprof-compile-time', []).append(int(result))
elif ('IR-bytes' in line) or ('mir bytes' in line):
resultDict.setdefault('vprof-ir-bytes', []).append(int(result))
elif ('IR ' in line) or ('mir ' in line): #note trailing space
resultDict.setdefault('vprof-ir-time', []).append(int(result))
resultDict.setdefault('vprof-count', []).append(int(line.strip().split(' ')[-1]))
# get all other metrics displayed
for line in output:
# If the testcase failed validation then stop parsing
if 'validation failed' in line.lower():
break
# results must have the form of 'metric metric_name value'
if 'metric' in line:
rl=line.rsplit()
if self.displayMetrics and 'all' not in self.displayMetrics:
# need to check which metrics to display
if rl[1].strip() not in self.displayMetrics:
continue
if len(rl)>2:
if '.' in rl[2]:
resultDict.setdefault(rl[1], []).append(float(rl[2]))
else:
resultDict.setdefault(rl[1], []).append(int(rl[2]))
def calculateSpeedup(self, testName, resultDict, resultDict2):
'''calculate speed diff between vms
stores all information into self.testData
'''
# TODO cpeyer: I'm still not certain that the correct thing to do re: index is to
# immediately change the raw values to indexed values
# testData structure:
# { testName : { metric : { results1/2 : [], best1/2 : num, avg1/2 : num, spdup : num }}}
# calc values for each metric in resultDict and resultDic2 if defined
if resultDict2:
# it's possible that there are different metrics being reported by each vm
# only calculate the common metrics (intersection of the dict keys)
metrics = set(resultDict.keys()) & set(resultDict2.keys())
else:
metrics = resultDict.keys()
for metric in metrics:
# if using the index file, compute the indexes and use those as results instead of raw values
if self.indexFile and not self.saveIndex:
resultDict[metric] = [self.computeIndex(testName, metric, x) for x in resultDict[metric]]
# Store the results
self.testData.setdefault(testName, {}).setdefault(metric, {}).setdefault('results1', []).extend(resultDict[metric])
# calculate the best result
try:
r1 = self.testData[testName][metric]['best1'] = self.metricInfo[metric]['best'](resultDict[metric])
except KeyError:
# metric is not defined in metricinfo, default to using min
r1 = self.testData[testName][metric]['best1'] = min(resultDict[metric])
# also add the metric to metricinfo
self.metricInfo[metric] = {'best':min, 'largerIsFaster':False}
a1 = self.testData[testName][metric]['avg1'] = mean(resultDict[metric])
if self.logresults:
self.socketlog("addresult2::%s::%s::%s::%0.1f::%s::%s::%s::%s::%s::%s::%s;" %
(testName, metric, r1, conf95(resultDict[metric]),
self.testData[testName][metric]['avg1'],
len(resultDict[metric]),
self.osName.upper(), self.log_config,
self.avmrevision, self.vmname, self.repo))
if self.score:
self.updateScore(self.score1, metric, r1)
if resultDict2:
if self.indexFile and not self.saveIndex:
resultDict2[metric] = [self.computeIndex(testName, metric, x) for x in resultDict2[metric]]
# Store the results
self.testData.setdefault(testName, {}).setdefault(metric, {}).setdefault('results2', []).extend(resultDict2[metric])
# calculate the best result
r2 = self.testData[testName][metric]['best2'] = self.metricInfo[metric]['best'](resultDict2[metric])
a2 = self.testData[testName][metric]['avg2'] = mean(resultDict2[metric])
if self.score:
self.updateScore(self.score2, metric, r2)
# calculate speedup btwn vms
# if the best value is the max value, reverse the sign of the spdup
sign = -1 if self.metricInfo[metric]['largerIsFaster'] else 1
self.testData[testName][metric]['spdup'] = 0 if r1 == 0 else sign * float(r1-r2)/r1 * 100.0
self.testData[testName][metric]['avg_spdup'] = 0 if a1 == 0 else sign * float(a1-a2)/a1 * 100.0
def checkForMetricChange(self, metric):
''' If the test metric has changed, print out a line indicating so.
Function only used when displaying results sorted by metric.
(Or only a single metric is being displayed)
'''
if self.currentMetric != metric:
self.currentMetric = metric
self.js_print('Metric: %s%s %s' % (self.metricInfo[metric].get('name',metric),
' (indexed)' if self.indexFile else '',
self.metricInfo[metric].get('desc','')))
def getBestResult(self, metric, resultList):
return max(resultList) if self.metricInfo[metric]['largerIsFaster'] else min(resultList)
def getSigString(self, sig, spdup):
'generate a string of +/- to give a quick visual representation of the perf difference'
return '--' if (sig < -2.0 and spdup < -5.0) else '- ' if sig < -1.0 \
else '++' if (sig > 2.0 and spdup > 5.0) else '+ ' if sig > 1.0 else ' '
def computeIndex(self, testname, metric, value):
'Compute the index value for given testname and index from self.testIndexDict'
indexValue = self.testIndexDict.get(testname, {}).get(metric)
if indexValue:
return (float(value)/indexValue)
else:
return -1
def truncateDescField(self, desc):
'Return desc truncated to self.testFieldLen'
if desc.endswith('.as'):
desc = desc[:-3]
return desc if len(desc) <= self.testFieldLen else desc[(len(desc) - self.testFieldLen):]
def updateScore(self, scoreDict, metric, result):
'Update the given score dict with newest result'
if not metric in scoreDict:
scoreDict[metric] = {'score':float(result), 'count':1}
else:
scoreDict[metric] = {'score':scoreDict[metric]['score'] * float(result),
'count':scoreDict[metric]['count']+1}
def printScoreSummary(self):
print('Score for %s:' % (self.avmname))
for k,v in self.score1.iteritems():
print(' %s = %s' % (k, str(pow(v['score'],1.0/v['count']))))
if self.score2:
print('Score for %s' % (self.avm2name,))
for k,v in self.score2.iteritems():
print(' %s = %s' % (k, str(pow(v['score'],1.0/v['count']))))
'''
def formatResult(self, result, truncateLen=DEFAULT_TRUNCATE_LEN, sigFigs = 1, metric = ''):
#Format the test result for display
# use currentMetric if no metric specified
metric = metric or self.currentMetric
if self.indexFile:
# automatically format as 2 sigfigs float unless its an int
if int(result) == result:
return int(result)
else:
return ('%% %s.%sf' % ((truncateLen, 2))) % result
if metric == 'memory':
return formatMemory(result)
else:
if int(result) == result or abs(result) > 10**(truncateLen-1):
return int(result)
else:
return ('%% %s.%sf' % ((truncateLen, sigFigs))) % result
# line below requires python >= 2.6
#return format(result, '%s.%sf' % (truncateLen, decimalPlaces))
'''
def formatResult(self, result, truncateLen=DEFAULT_TRUNCATE_LEN, sigFigs = None, metric = ''):
if sigFigs is None:
# Infer suitable number of decimal places from result's magnitude
if result < 1:
sigFigs = 3
elif result < 10:
sigFigs = 2
else:
sigFigs = 1
#Format the test result for display
# use currentMetric if no metric specified
metric = metric or self.currentMetric
if self.indexFile:
# automatically format as 2 sigfigs float unless its an int
return ('%% %s.%sf' % ((truncateLen, 2))) % result
if metric == 'memory':
return formatMemory(result)
else:
return ('%% %s.%sf' % ((truncateLen, sigFigs))) % result
# line below requires python >= 2.6
#return format(result, '%s.%sf' % (truncateLen, decimalPlaces))
def runTest(self, testAndNum):
'Run a singe performance testcase self.iterations times and print out results'
ast = testAndNum[0]
testName = ast
# strip off ./ as test is then treated differently in perf db
if testName[:2] == './':
testName = testName[2:]
if self.altsearchpath!=None and ast.startswith(self.altsearchpath):
testName = ast[len(self.altsearchpath):]
testnum = testAndNum[1]
if ast.startswith("./"):
ast=ast[2:]
dir =ast[0:ast.rfind('/')]
root,ext = splitext(ast)
tname = root[root.rfind('/')+1:]
abc = "%s.abc" % root
settings = self.get_test_settings(root)
if '.*' in settings and 'skip' in settings['.*']:
self.verbose_print(' skipping %s' % testName)
self.allskips += 1
return
if self.forcerebuild and isfile(abc):
os.unlink(abc)
if isfile(abc) and getmtime(ast)>getmtime(abc):
self.verbose_print("%s has been modified, recompiling" % ast)
os.unlink(abc)
if not isfile(abc):
self.compile_test(ast)
if not isfile(abc):
self.js_print("compile FAILED!, file not found " + abc)
# determine current config
config = "%s" % self.vmargs.replace(" ", "")
config = "%s" % config.replace("\"", "")
if config.find("-memlimit")>-1:
config=config[0:config.find("-memlimit")]
# results are stored in a dictionary using the metric as key
# e.g.: {'time':[1,2,3,4]}
resultsDict1 = {}
resultsDict2 = {}
if self.memory and self.vmargs.find("-memstats")==-1:
self.vmargs="%s -memstats" % self.vmargs
if self.memory and len(self.vmargs2)>0 and self.vmargs2.find("-memstats")==-1:
self.vmargs2="%s -memstats" % self.vmargs2
scriptargs=[['','']]
if os.path.exists("%s.script_args" % testName):
lines=open("%s.script_args" % testName).read().split('\n')
scriptargs=[]
for line in lines:
if len(line.strip())==0 or line.strip().startswith('#'):
continue
tokens=line.split(',')
args=tokens[0]
if args.startswith('-- ')==False:
args='-- %s' % args
if len(tokens)>0:
desc=tokens[1]
else:
desc=args.replace(' ','_')
scriptargs.append([args,desc])
if len(scriptargs)==0:
scriptargs=['','']
for arg in scriptargs:
scriptArg=arg[0]
testNameDesc=testName+arg[1]
resultsDict1 = {}
resultsDict2 = {}
for i in range(self.iterations):
if self.aotsdk and self.aotout:
progname = testName.replace(".as", "")
progname = progname.replace("/", ".")
progpath = os.path.join(self.aotout, progname)
if not self.avm:
(f1,err,exitcode) = self.run_pipe(os.path.join(self.aotout, progname))
# print("about to execute: " + os.path.join(self.aotout, progname))
exitcode = 0 # hack!
elif self.avm: # AVM is set to a script that will handle SSH communications
cmd = "%s %s %s %s" % (self.avm, self.vmargs, abc, scriptArg)
(f1,err,exitcode) = self.run_pipe(cmd)
self.debug_print("%s" % (cmd))
self.debug_print(f1)
if testName in self.aot_compile_times:
f1.append('metric compile_time %.2f' % self.aot_compile_times[testName])
else:
(f1,err,exitcode) = self.run_pipe("%s %s %s %s" % (self.avm, self.vmargs, abc, scriptArg))
self.debug_print("%s %s %s %s" % (self.avm, self.vmargs, abc, scriptArg))
self.debug_print(f1)
if self.avm2:
(f2,err2,exitcode2) = self.run_pipe("%s %s %s %s" % (self.avm2, self.vmargs2 if self.vmargs2 else self.vmargs, abc, scriptArg))
self.debug_print("%s %s %s %s" % (self.avm2, self.vmargs2 if self.vmargs2 else self.vmargs, abc, scriptArg))
self.debug_print(f2)
try:
if exitcode!=0:
self.finalexitcode=1
self.js_print("%-50s %7s %s" % (testName,'Avm1 Error: Test Exited with exit code:', exitcode))
return
else:
self.parsePerfTestOutput(f1, resultsDict1)
if self.avm2:
if exitcode2!=0:
self.finalexitcode=1
self.js_print("%-50s %7s %s" % (testName,'Avm2 Error: Test Exited with exit code:', exitcode))
return
else:
self.parsePerfTestOutput(f2, resultsDict2)
except:
traceback.print_exc()
exit(-1)
# end for i in range(iterations)
if not self.validResultsDictionary('avm', resultsDict1, testName, f1):
return
if self.avm2 and not self.validResultsDictionary('avm2', resultsDict2, testName, f2):
return
# calculate best results and store to self.testData
self.calculateSpeedup(testNameDesc, resultsDict1, resultsDict2)
self.printTestResults(testNameDesc)
def validResultsDictionary(self, avmName, resultsDict, testName, output):
'''Make sure that the results dictionary has valid number of results for each metric
Return True if valid, False if not valid
'''
if not resultsDict:
self.js_print('%s : No metrics returned from test!' % testName)
self.js_print(' test output: %s' % [l for l in output])
return False
# check to make sure every metric has the right number of results
for metric in resultsDict.keys():
if len(resultsDict[metric]) != self.iterations:
self.js_print('%s : %s number of results for the %s metric is != # of iterations (%s)' %
(testName, avmName, metric, self.iterations))
return False
return True
def checkForDirChange(self, name):
# extract dir
try:
last_slash = name.rindex('/')+1
except ValueError:
last_slash = None
if last_slash:
dir = name[:last_slash]
if not self.fullpath:
name = ' '+name[last_slash:]
if dir and dir != self.currentDir:
self.js_print('Dir: %s' % dir)
self.currentDir = dir
return name
def printTestResults(self, testName):
'''Print the results for a single test'''
# Support two output modes:
# 1. Sorted by test, each metric gets a seperate line after each testname (default)
# 2. Sorted by metric. Output the results for a single metric (TODO: how do i determine? command switch?)
# and then when the entire testrun is finished, output results for all other metrics
testData = self.testData
# How many metrics are stored for this test?
numMetrics = len(testData[testName])
if numMetrics == 1:
metric = list(testData[testName].keys())[0]
# print out metric info if needed
self.checkForMetricChange(metric)
# Print out dir names and indent tests below
desc = self.checkForDirChange(testName)
if self.avm2:
if self.iterations == 1:
if numMetrics == 1:
self.printSingleIterationComparison(desc, testName, metric)
else: # numMetrics > 1
self.js_print(desc)
for metric in testData[testName].keys():
self.printSingleIterationComparison(' %s' % self.metricInfo[metric].get('name',metric), testName, metric)
else: # multiple iterations
if numMetrics == 1:
self.printMultiIterationComparison(desc, testName, metric)
else: # numMetrics > 1
self.js_print(desc)
for metric in testData[testName].keys():
self.printMultiIterationComparison(' %s' % self.metricInfo[metric].get('name',metric), testName, metric)
else: # only one avm tested
if self.iterations == 1:
if numMetrics == 1:
self.js_print('%-*s %*s' % (self.testFieldLen, self.truncateDescField(desc),
self.resultWidth, self.formatResult(testData[testName][metric]['best1'], metric=metric)))
else: # numMetrics > 1
self.js_print(desc)
for metric in testData[testName].keys():
self.js_print(' %-*s %*s' % (self.testFieldLen-2, self.metricInfo[metric].get('name',metric),
self.resultWidth, self.formatResult(testData[testName][metric]['best1'], metric=metric)))
else: # multiple iterations
if numMetrics == 1:
self.js_print(('%-*s %*s %*s %4.1f%% %s') %
(self.testFieldLen, self.truncateDescField(desc), self.resultWidth,
self.formatResult(testData[testName][metric]['best1'], metric=metric),
self.resultWidth, self.formatResult(testData[testName][metric]['avg1'], metric=metric),
conf95(self.testData[testName][metric]['results1']),
[self.formatResult(x, metric=metric) for x in self.testData[testName][metric]['results1']] if self.raw else ''
))
else: # numMetrics > 1
self.js_print(desc)
for metric in testData[testName].keys():
self.js_print((' %-*s %*s %*s %4.1f%% %s') %
(self.testFieldLen-2, self.metricInfo[metric].get('name',metric),
self.resultWidth,
self.formatResult(testData[testName][metric]['best1'], metric=metric),
self.resultWidth, self.formatResult(testData[testName][metric]['avg1'], metric=metric),
conf95(self.testData[testName][metric]['results1']),
[self.formatResult(x, metric=metric) for x in self.testData[testName][metric]['results1']] if self.raw else ''
))
#else:
# self.js_print("%-*s %5s %s" % (self.testFieldLen, truncateTestname(testName),
# 'no test result - test output: ',f1))
# self.finalexitcode=1
def printSingleIterationComparison(self, descStr, testName, metric):
'''Print output for single iteration when comparing 2 vms'''
spdup = self.testData[testName][metric]['spdup']
avg_spdup = self.testData[testName][metric]['avg_spdup']
if pythonVersion26():
self.js_print('{0:<{testwidth}}{1:>{rw}}{2:>{rw}}{3:>{rw}}{4:>{rw}}'.
format(self.truncateDescField(descStr),
self.formatResult(self.testData[testName][metric]['best1'], metric=metric),
self.formatResult(self.testData[testName][metric]['best2'], metric=metric),
self.formatResult(spdup, 4, 1, 'percent'),
self.formatResult(avg_spdup, 4, 1, 'percent'),
testwidth=self.testFieldLen, rw=self.resultWidth))
else: # python <= 2.5
self.js_print('%-*s %5s %7s %6.1f %6.1f' % (self.testFieldLen, self.truncateDescField(descStr),
self.formatResult(self.testData[testName][metric]['best1'], metric=metric),
self.formatResult(self.testData[testName][metric]['best2'], metric=metric),
spdup, avg_spdup))
def printMultiIterationComparison(self, descStr, testName, metric):
'''Print output for multiple iterations when comparing 2 vms'''
relStdDev1 = rel_std_dev(self.testData[testName][metric]['results1'])
relStdDev2 = rel_std_dev(self.testData[testName][metric]['results2'])
spdup = self.testData[testName][metric]['spdup']
avg_spdup = self.testData[testName][metric]['avg_spdup']
try:
sig = spdup / (relStdDev1+relStdDev2)
except ZeroDivisionError:
# determine sig by %diff (spdup) only
sig = cmp(spdup,0) * (3.0 if abs(spdup) > 5.0 else 2.0 if abs(spdup) > 1.0 else 0.0)
sig_str = self.getSigString(sig, spdup)
if self.detail:
self.js_print('%-*s [%7s :%7s] %7s ±%4.1f%% [%7s :%7s] %7s ±%4.1f%% %6.1f%% %6.1f %2s %s %s' %
(self.testFieldLen, self.truncateDescField(descStr),
self.formatResult(min(self.testData[testName][metric]['results1']), metric=metric),
self.formatResult(max(self.testData[testName][metric]['results1']), metric=metric),
self.formatResult(self.testData[testName][metric]['avg1'], metric=metric),
relStdDev1,
self.formatResult(min(self.testData[testName][metric]['results2']), metric=metric),
self.formatResult(max(self.testData[testName][metric]['results2']), metric=metric),
self.formatResult(self.testData[testName][metric]['avg2'], metric=metric),
relStdDev2,
spdup, avg_spdup,sig_str,
[self.formatResult(x, metric=metric) for x in self.testData[testName][metric]['results1']] if self.raw else '',
[self.formatResult(x, metric=metric) for x in self.testData[testName][metric]['results2']] if self.raw else ''
))
elif pythonVersion26():
self.js_print('{0:<{testwidth}}{1:>{rw}}{2:>{rw}}{3:>{rw}}{4:>{rw}}{5:>{rw}}{6:>{rw}}{7:>3}{8}{9}'.
format(self.truncateDescField(descStr),
self.formatResult(self.testData[testName][metric]['best1'], metric=metric),
self.formatResult(self.testData[testName][metric]['avg1'], metric=metric),
self.formatResult(self.testData[testName][metric]['best2'], metric=metric),
self.formatResult(self.testData[testName][metric]['avg2'], metric=metric),
self.formatResult(spdup, 4, 1, 'percent'),
self.formatResult(avg_spdup, 4, 1, 'percent'),
sig_str,
[self.formatResult(x, metric=metric) for x in self.testData[testName][metric]['results1']] if self.raw else '',
[self.formatResult(x, metric=metric) for x in self.testData[testName][metric]['results2']] if self.raw else '',
testwidth=self.testFieldLen, rw=self.resultWidth))
else:
# python <= 2.5
self.js_print('%-*s %*s %*s %*s %*s %*s %*s %2s %s %s' %
(self.testFieldLen, self.truncateDescField(descStr),
self.resultWidth, self.formatResult(self.testData[testName][metric]['best1'], metric=metric),
self.resultWidth, self.formatResult(self.testData[testName][metric]['avg1'], metric=metric),
self.resultWidth, self.formatResult(self.testData[testName][metric]['best2'], metric=metric),
self.resultWidth, self.formatResult(self.testData[testName][metric]['avg2'], metric=metric),
self.resultWidth, self.formatResult(spdup, 4, 2, 'percent'),
self.resultWidth, self.formatResult(avg_spdup, 4, 2, 'percent'),
sig_str,
[self.formatResult(x, metric=metric) for x in self.testData[testName][metric]['results1']] if self.raw else '',
[self.formatResult(x, metric=metric) for x in self.testData[testName][metric]['results2']] if self.raw else ''
))
def convertAvmOptionsDictToList(self):
'''Convert self.avmOptions to lists that can be used by csvwriter'''
avmOptionsHeader = []
avmOptions = []
avm2Options = []
# get all the keys from both option dictionaries
# uniquify the list so there are no duplicates using set
keys = sorted(set(self.avmOptionsDict.keys() + self.avm2OptionsDict.keys()))
for key in keys:
avmOptionsHeader.append(key)
avmOptions.append(self.avmOptionsDict.get(key, False))
avm2Options.append(self.avm2OptionsDict.get(key, False))
return avmOptionsHeader, avmOptions, avm2Options
def outputCsvToFile(self):
# testData structure:
# { testName : { metric : { results1/2 : [], best1/2 : num, avg1/2 : num, spdup : num }}}
import csv
try:
csvwriter = csv.writer(open(self.csvfile, 'a' if self.csvAppend else 'w'))
except IOError:
if self.csvfile != 'output.csv':
print('Error attempting to open %s. Saving to ./output.csv instead' % self.csvfile)
self.csvfile = 'output.csv'
self.outputCsvToFile()
else:
print('Error attempting to write to output.csv file - aborting.')
return
print('Writing out csv data to %s' % self.csvfile)
# TODO: generation of the options dict needs to be moved to runtestBase
# more work is needed to get that working there
# This here is proof of concept that needs to be fleshed out
self.avmOptionsDict = {
'avm' : self.avmname,
}
self.avm2OptionsDict = {
'avm' : self.avm2name,
}
avmOptionsHeader, avmOptions, avm2Options = self.convertAvmOptionsDictToList()
# write the header
csvwriter.writerow(avmOptionsHeader + ['testname', 'metric', 'metricunit', 'iteration', 'value'])
# Write out all the data contained in self.testData to the csv file
for testname, testDict in self.testData.iteritems():
for metric, metricDict in testDict.iteritems():
for iteration, value in enumerate(metricDict['results1']):
csvwriter.writerow(avmOptions + \
[testname, metric,
self.metricInfo[metric].get('unit', ''),
iteration+1, # use iteration+1 so that iteration values are not zero based
value])
if metricDict.get('results2'):
for iteration, value in enumerate(metricDict['results2']):
csvwriter.writerow(avm2Options + \
[testname, metric,
self.metricInfo[metric].get('unit', ''),
iteration+1, # use iteration+1 so that iteration values are not zero based
value])
def outputTestIndexFile(self):
'''write out a testIndex file'''
output = [self.testIndexHeaderString]
# update testIndexDict with newest results
for testname, testDict in self.testData.iteritems():
for metric, metricDict in testDict.iteritems():
self.testIndexDict.setdefault(testname, {}).update({metric:metricDict['best1']})
output.append('testIndexDict = ')
print('Saving values to index file: %s' % self.saveIndexFile)
try:
f = open(self.saveIndexFile, 'w')
except IOError:
print('Error attempting to open %s for write.' % self.saveIndexFile)
print('Aborting saving of index file.')
return
f.writelines(output)
import pprint
pprint.pprint(self.testIndexDict, f)
try:
runtest = PerformanceRuntest()
exit(runtest.finalexitcode)
except SystemExit:
raise
except TypeError:
# This is the error thrown when ctrl-c'ing out of a testrun
print('\nKeyboard Interrupt')
raise
except:
print('Runtest Abnormal Exit')
raise
| mpl-2.0 |
andnovar/kivy | kivy/adapters/adapter.py | 44 | 5525 | '''
Adapter
=======
.. versionadded:: 1.5
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
An :class:`~kivy.adapters.adapter.Adapter` is a bridge between data and
an :class:`~kivy.uix.abstractview.AbstractView` or one of its subclasses, such
as a :class:`~kivy.uix.listview.ListView`.
The following arguments can be passed to the contructor to initialise the
corresponding properties:
* :attr:`~Adapter.data`: for any sort of data to be used in a view. For an
:class:`~kivy.adapters.adapter.Adapter`, data can be an object as well as a
list, dict, etc. For a :class:`~kivy.adapters.listadapter.ListAdapter`, data
should be a list. For a :class:`~kivy.adapters.dictadapter.DictAdapter`,
data should be a dict.
* :attr:`~Adapter.cls`: the class used to instantiate each list item view
instance (Use this or the template argument).
* :attr:`~Adapter.template`: a kv template to use to instantiate each list item
view instance (Use this or the cls argument).
* :attr:`~Adapter.args_converter`: a function used to transform the data items
in preparation for either a cls instantiation or a kv template
invocation. If no args_converter is provided, the data items are assumed
to be simple strings.
Please refer to the :mod:`~kivy.adapters` documentation for an overview of how
adapters are used.
'''
__all__ = ('Adapter', )
from kivy.event import EventDispatcher
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.adapters.args_converters import list_item_args_converter
from kivy.factory import Factory
from kivy.compat import string_types
class Adapter(EventDispatcher):
'''An :class:`~kivy.adapters.adapter.Adapter` is a bridge between data and
an :class:`~kivy.uix.abstractview.AbstractView` or one of its subclasses,
such as a :class:`~kivy.uix.listview.ListView`.
'''
data = ObjectProperty(None)
'''
The data for which a view is to be constructed using either the cls or
template provided, together with the args_converter provided or the default
args_converter.
In this base class, data is an ObjectProperty, so it could be used for a
wide variety of single-view needs.
Subclasses may override it in order to use another data type, such as a
:class:`~kivy.properties.ListProperty` or
:class:`~kivy.properties.DictProperty` as appropriate. For example, in a
:class:`~.kivy.adapters.listadapter.ListAdapter`, data is a
:class:`~kivy.properties.ListProperty`.
:attr:`data` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
cls = ObjectProperty(None)
'''
A class for instantiating a given view item (Use this or template). If this
is not set and neither is the template, a :class:`~kivy.uix.label.Label`
is used for the view item.
:attr:`cls` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
template = ObjectProperty(None)
'''
A kv template for instantiating a given view item (Use this or cls).
:attr:`template` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
args_converter = ObjectProperty(None)
'''
A function that prepares an args dict for the cls or kv template to build
a view from a data item.
If an args_converter is not provided, a default one is set that assumes
simple content in the form of a list of strings.
:attr:`args_converter` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
def __init__(self, **kwargs):
if 'data' not in kwargs:
raise Exception('adapter: input must include data argument')
if 'cls' in kwargs:
if 'template' in kwargs:
msg = 'adapter: cannot use cls and template at the same time'
raise Exception(msg)
elif not kwargs['cls']:
raise Exception('adapter: a cls or template must be defined')
else:
if 'template' in kwargs:
if not kwargs['template']:
msg = 'adapter: a cls or template must be defined'
raise Exception(msg)
else:
raise Exception('adapter: a cls or template must be defined')
if 'args_converter' in kwargs:
self.args_converter = kwargs['args_converter']
else:
self.args_converter = list_item_args_converter
super(Adapter, self).__init__(**kwargs)
def bind_triggers_to_view(self, func):
self.bind(data=func)
def get_data_item(self):
return self.data
def get_cls(self):
'''
.. versionadded:: 1.9.0
Returns the widget type specified by self.cls. If it is a
string, the :class:`~kivy.factory.Factory` is queried to retrieve the
widget class with the given name, otherwise it is returned directly.
'''
cls = self.cls
if isinstance(cls, string_types):
try:
cls = getattr(Factory, cls)
except AttributeError:
raise AttributeError(
'Listadapter cls widget does not exist.')
return cls
def get_view(self, index): # pragma: no cover
item_args = self.args_converter(self.data)
cls = self.get_cls()
if cls:
return cls(**item_args)
else:
return Builder.template(self.template, **item_args)
| mit |
dahlstrom-g/intellij-community | python/helpers/pydev/_pydev_imps/_pydev_BaseHTTPServer.py | 14 | 22563 | """HTTP server base class.
Note: the class in this module doesn't implement any HTTP request; see
SimpleHTTPServer for simple implementations of GET, HEAD and POST
(including CGI scripts). It does, however, optionally implement HTTP/1.1
persistent connections, as of version 0.3.
Contents:
- BaseHTTPRequestHandler: HTTP request handler base class
- test: test function
XXX To do:
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
#
# and
#
# Network Working Group R. Fielding
# Request for Comments: 2616 et al
# Obsoletes: 2068 June 1999
# Category: Standards Track
#
# URL: http://www.faqs.org/rfcs/rfc2616.html
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.3"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import sys
from _pydev_imps._pydev_saved_modules import time
from _pydev_imps._pydev_saved_modules import socket
from warnings import filterwarnings, catch_warnings
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
from _pydev_imps import _pydev_SocketServer as SocketServer
# Default error message template
DEFAULT_ERROR_MESSAGE = """\
<head>
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code %(code)d.
<p>Message: %(message)s.
<p>Error code explanation: %(code)s = %(explain)s.
</body>
"""
DEFAULT_ERROR_CONTENT_TYPE = "text/html"
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class HTTPServer(SocketServer.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
<path> is encoded using the URL encoding scheme (using %xx to signify
the ASCII character with hex code xx).
The specification specifies that lines are separated by CRLF but
for compatibility with the widest range of clients recommends
servers also handle LF. Similarly, whitespace in the request line
is treated sensibly (allowing multiple spaces between components
and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.x protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of mimetools.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
# The default request version. This only affects responses up until
# the point where the request line is parsed, so it mainly decides what
# the client gets back when sending a malformed request line.
# Most web servers default to HTTP 0.9, i.e. don't send a status line.
default_request_version = "HTTP/0.9"
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = 1
requestline = self.raw_requestline
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive
self.headers = self.MessageClass(self.rfile, 0)
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
return True
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
self.wfile.flush() #actually send the response if not already done.
except socket.timeout:
#a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", sys.exc_info()[1])
self.close_connection = 1
return
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content)
error_message_format = DEFAULT_ERROR_MESSAGE
error_content_type = DEFAULT_ERROR_CONTENT_TYPE
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
# print (self.protocol_version, code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("\r\n")
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, format, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(format, *args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client host and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address[:2]
return socket.getfqdn(host)
# Essentially static class variables
# The version of the HTTP protocol we support.
# Set this to HTTP/1.1 to enable automatic keepalive
protocol_version = "HTTP/1.0"
# The Message-like class used to parse headers
MessageClass = mimetools.Message
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See RFC 2616.
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
httpd.serve_forever()
if __name__ == '__main__':
test()
| apache-2.0 |
amitdeutsch/oppia | core/domain/user_jobs_one_off_test.py | 1 | 24075 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for user dashboard computations."""
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_jobs_one_off
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
(user_models,) = models.Registry.import_models(
[models.NAMES.user])
taskqueue_services = models.Registry.import_taskqueue_services()
search_services = models.Registry.import_search_services()
class UserContributionsOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off dashboard subscriptions job."""
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
USER_B_EMAIL = 'b@example.com'
USER_B_USERNAME = 'b'
USER_C_EMAIL = 'c@example.com'
USER_C_USERNAME = 'c'
USER_D_EMAIL = 'd@example.com'
USER_D_USERNAME = 'd'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.UserContributionsOneOffJob.create_new()
user_jobs_one_off.UserContributionsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
queue_name=taskqueue_services.QUEUE_NAME_DEFAULT),
1)
self.process_and_flush_pending_tasks()
def setUp(self):
super(UserContributionsOneOffJobTests, self).setUp()
# User A has no created or edited explorations
# User B has one created exploration
# User C has one edited exploration
# User D has created an exploration and then edited it.
# (This is used to check that there are no duplicate
# entries in the contribution lists.)
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
self.user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
self.signup(self.USER_D_EMAIL, self.USER_D_USERNAME)
self.user_d_id = self.get_user_id_from_email(self.USER_D_EMAIL)
self.save_new_valid_exploration(
self.EXP_ID_1, self.user_b_id, end_state_name='End')
exp_services.update_exploration(self.user_c_id, self.EXP_ID_1, [{
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
}], 'Test edit')
self.save_new_valid_exploration(
self.EXP_ID_2, self.user_d_id, end_state_name='End')
exp_services.update_exploration(self.user_d_id, self.EXP_ID_2, [{
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
}], 'Test edit')
def test_null_case(self):
"""Tests the case where user has no created or edited explorations."""
self._run_one_off_job()
user_a_contributions_model = user_models.UserContributionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_contributions_model.created_exploration_ids, [])
self.assertEqual(user_a_contributions_model.edited_exploration_ids, [])
def test_created_exp(self):
"""Tests the case where user has created (and therefore edited)
an exploration."""
self._run_one_off_job()
user_b_contributions_model = user_models.UserContributionsModel.get(
self.user_b_id)
self.assertEqual(
user_b_contributions_model.created_exploration_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_contributions_model.edited_exploration_ids, [self.EXP_ID_1])
def test_edited_exp(self):
"""Tests the case where user has an edited exploration."""
self._run_one_off_job()
user_c_contributions_model = user_models.UserContributionsModel.get(
self.user_c_id)
self.assertEqual(
user_c_contributions_model.created_exploration_ids, [])
self.assertEqual(
user_c_contributions_model.edited_exploration_ids, [self.EXP_ID_1])
def test_for_duplicates(self):
"""Tests the case where user has an edited exploration, and edits
it again making sure it is not duplicated."""
self._run_one_off_job()
user_d_contributions_model = user_models.UserContributionsModel.get(
self.user_d_id)
self.assertEqual(
user_d_contributions_model.edited_exploration_ids,
[self.EXP_ID_2])
self.assertEqual(
user_d_contributions_model.created_exploration_ids,
[self.EXP_ID_2])
class DashboardSubscriptionsOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off dashboard subscriptions job."""
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
COLLECTION_ID_1 = 'col_id_1'
COLLECTION_ID_2 = 'col_id_2'
EXP_ID_FOR_COLLECTION_1 = 'id_of_exp_in_collection_1'
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
USER_B_EMAIL = 'b@example.com'
USER_B_USERNAME = 'b'
USER_C_EMAIL = 'c@example.com'
USER_C_USERNAME = 'c'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.DashboardSubscriptionsOneOffJob.create_new()
user_jobs_one_off.DashboardSubscriptionsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
queue_name=taskqueue_services.QUEUE_NAME_DEFAULT),
1)
self.process_and_flush_pending_tasks()
def _null_fn(self, *args, **kwargs):
"""A mock for functions of the form subscribe_to_*() to represent
behavior prior to the implementation of subscriptions.
"""
pass
def setUp(self):
super(DashboardSubscriptionsOneOffJobTests, self).setUp()
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
self.user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A creates and saves a new valid exploration.
self.save_new_valid_exploration(
self.EXP_ID_1, self.user_a_id, end_state_name='End')
def test_null_case(self):
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
self._run_one_off_job()
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
def test_feedback_thread_subscription(self):
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
self.assertEqual(user_c_subscriptions_model, None)
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User B starts a feedback thread.
feedback_services.create_thread(
self.EXP_ID_1, None, self.user_b_id, 'subject', 'text')
# User C adds to that thread.
thread_id = feedback_services.get_threadlist(
self.EXP_ID_1)[0]['thread_id']
feedback_services.create_message(
thread_id, self.user_c_id, None, None, 'more text')
self._run_one_off_job()
# Both users are subscribed to the feedback thread.
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id)
self.assertEqual(user_b_subscriptions_model.activity_ids, [])
self.assertEqual(user_c_subscriptions_model.activity_ids, [])
self.assertEqual(
user_b_subscriptions_model.feedback_thread_ids, [thread_id])
self.assertEqual(
user_c_subscriptions_model.feedback_thread_ids, [thread_id])
def test_exploration_subscription(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A adds user B as an editor to the exploration.
rights_manager.assign_role_for_exploration(
self.user_a_id, self.EXP_ID_1, self.user_b_id,
rights_manager.ROLE_EDITOR)
# User A adds user C as a viewer of the exploration.
rights_manager.assign_role_for_exploration(
self.user_a_id, self.EXP_ID_1, self.user_c_id,
rights_manager.ROLE_VIEWER)
self._run_one_off_job()
# Users A and B are subscribed to the exploration. User C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(user_a_subscriptions_model.feedback_thread_ids, [])
self.assertEqual(user_b_subscriptions_model.feedback_thread_ids, [])
self.assertEqual(user_c_subscriptions_model, None)
def test_two_explorations(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A creates and saves another valid exploration.
self.save_new_valid_exploration(self.EXP_ID_2, self.user_a_id)
self._run_one_off_job()
# User A is subscribed to two explorations.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
self.assertEqual(
sorted(user_a_subscriptions_model.activity_ids),
sorted([self.EXP_ID_1, self.EXP_ID_2]))
def test_community_owned_exploration(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A adds user B as an editor to the exploration.
rights_manager.assign_role_for_exploration(
self.user_a_id, self.EXP_ID_1, self.user_b_id,
rights_manager.ROLE_EDITOR)
# The exploration becomes community-owned.
rights_manager.publish_exploration(self.user_a_id, self.EXP_ID_1)
rights_manager.release_ownership_of_exploration(
self.user_a_id, self.EXP_ID_1)
# User C edits the exploration.
exp_services.update_exploration(
self.user_c_id, self.EXP_ID_1, [], 'Update exploration')
self._run_one_off_job()
# User A and user B are subscribed to the exploration; user C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(user_c_subscriptions_model, None)
def test_deleted_exploration(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A deletes the exploration.
exp_services.delete_exploration(self.user_a_id, self.EXP_ID_1)
self._run_one_off_job()
# User A is not subscribed to the exploration.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
def test_collection_subscription(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User A creates and saves a new valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_1, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
# User A adds user B as an editor to the collection.
rights_manager.assign_role_for_collection(
self.user_a_id, self.COLLECTION_ID_1, self.user_b_id,
rights_manager.ROLE_EDITOR)
# User A adds user C as a viewer of the collection.
rights_manager.assign_role_for_collection(
self.user_a_id, self.COLLECTION_ID_1, self.user_c_id,
rights_manager.ROLE_VIEWER)
self._run_one_off_job()
# Users A and B are subscribed to the collection. User C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
# User A is also subscribed to the exploration within the collection
# because they created both.
self.assertEqual(
sorted(user_a_subscriptions_model.activity_ids), [
self.EXP_ID_1, self.EXP_ID_FOR_COLLECTION_1])
self.assertEqual(
user_b_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
self.assertEqual(user_a_subscriptions_model.feedback_thread_ids, [])
self.assertEqual(user_b_subscriptions_model.feedback_thread_ids, [])
self.assertEqual(user_c_subscriptions_model, None)
def test_two_collections(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User A creates and saves a new valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_1, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
# User A creates and saves another valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_2, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
self._run_one_off_job()
# User A is subscribed to two collections.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
self.assertEqual(
sorted(user_a_subscriptions_model.collection_ids),
sorted([self.COLLECTION_ID_1, self.COLLECTION_ID_2]))
def test_deleted_collection(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User A creates and saves a new collection.
self.save_new_default_collection(
self.COLLECTION_ID_1, self.user_a_id)
# User A deletes the collection.
collection_services.delete_collection(
self.user_a_id, self.COLLECTION_ID_1)
# User A deletes the exploration from earlier.
exp_services.delete_exploration(self.user_a_id, self.EXP_ID_1)
self._run_one_off_job()
# User A is not subscribed to the collection.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
def test_adding_exploration_to_collection(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User B creates and saves a new collection.
self.save_new_default_collection(
self.COLLECTION_ID_1, self.user_b_id)
# User B adds the exploration created by user A to the collection.
collection_services.update_collection(
self.user_b_id, self.COLLECTION_ID_1, [{
'cmd': collection_domain.CMD_ADD_COLLECTION_NODE,
'exploration_id': self.EXP_ID_1
}], 'Add new exploration to collection.')
# Users A and B have no subscriptions (to either explorations or
# collections).
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
self.assertEqual(user_b_subscriptions_model, None)
self._run_one_off_job()
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
# User B should be subscribed to the collection and user A to the
# exploration.
self.assertEqual(
user_a_subscriptions_model.activity_ids, [self.EXP_ID_1])
self.assertEqual(
user_a_subscriptions_model.collection_ids, [])
self.assertEqual(
user_b_subscriptions_model.activity_ids, [])
self.assertEqual(
user_b_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
class UserFirstContributionMsecOneOffJobTests(test_utils.GenericTestBase):
EXP_ID = 'test_exp'
def setUp(self):
super(UserFirstContributionMsecOneOffJobTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_EMAIL])
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
def test_contribution_msec_updates_on_published_explorations(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.admin_id, end_state_name='End')
init_state_name = exploration.init_state_name
# Test that no contribution time is set.
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(
user_services.get_user_settings(self.admin_id).first_contribution_msec)
# Test all owners and editors of exploration after publication have
# updated times.
exp_services.publish_exploration_and_update_user_profiles(
self.admin_id, self.EXP_ID)
rights_manager.release_ownership_of_exploration(
self.admin_id, self.EXP_ID)
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [{
'cmd': 'edit_state_property',
'state_name': init_state_name,
'property_name': 'widget_id',
'new_value': 'MultipleChoiceInput'
}], 'commit')
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
self.assertIsNotNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_update_on_unpublished_explorations(self):
self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
exp_services.publish_exploration_and_update_user_profiles(
self.owner_id, self.EXP_ID)
# We now manually reset the user's first_contribution_msec to None.
# This is to test that the one off job skips over the unpublished
# exploration and does not reset the user's first_contribution_msec.
user_services._update_first_contribution_msec( # pylint: disable=protected-access
self.owner_id, None)
rights_manager.unpublish_exploration(self.admin_id, self.EXP_ID)
# Test that first contribution time is not set for unpublished
# explorations.
job_id = user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new()
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
| apache-2.0 |
jnishi/chainer | tests/chainer_tests/functions_tests/array_tests/test_copy.py | 4 | 3409 | import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
def _to_gpu(x, device_id):
if device_id >= 0:
return cuda.to_gpu(x, device_id)
else:
return x
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestCopy(unittest.TestCase):
def setUp(self):
self.x_data = numpy.random.uniform(
-1, 1, (10, 5)).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, (10, 5)).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, (10, 5)).astype(self.dtype)
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
def check_forward(self, src_id, dst_id):
x_data = _to_gpu(self.x_data, src_id)
x = chainer.Variable(x_data)
y = functions.copy(x, dst_id)
self.assertEqual(self.x_data.dtype, self.dtype)
numpy.testing.assert_array_equal(self.x_data, cuda.to_cpu(y.data))
def check_backward(self, src_id, dst_id):
x_data = _to_gpu(self.x_data, src_id)
x = chainer.Variable(x_data)
y = functions.copy(x, dst_id)
gy = _to_gpu(self.gy, dst_id)
y.grad = gy
y.backward()
x_grad = x.grad
self.assertEqual(cuda.get_device_from_array(x_grad).id, src_id)
numpy.testing.assert_array_equal(
cuda.to_cpu(x_grad), self.gy)
def test_forward_cpu(self):
self.check_forward(-1, -1)
def test_backward_cpu(self):
self.check_backward(-1, -1)
@attr.gpu
def test_forward_gpu(self):
device_id = cuda.Device().id
self.check_forward(device_id, device_id)
@attr.gpu
def test_check_backward_gpu(self):
device_id = cuda.Device().id
self.check_forward(device_id, device_id)
@attr.gpu
def test_forward_cpu_to_gpu(self):
device_id = cuda.Device().id
self.check_forward(-1, device_id)
@attr.gpu
def test_backward_cpu_to_gpu(self):
device_id = cuda.Device().id
self.check_backward(-1, device_id)
@attr.gpu
def test_forward_gpu_to_cpu(self):
device_id = cuda.Device().id
self.check_forward(device_id, -1)
@attr.gpu
def test_backward_gpu_to_cpu(self):
device_id = cuda.Device().id
self.check_backward(device_id, -1)
@attr.multi_gpu(2)
def test_forward_multigpu(self):
self.check_forward(0, 1)
@attr.multi_gpu(2)
def test_backward_multigpu(self):
self.check_backward(0, 1)
def check_double_backward(self, x_data, y_grad, x_grad_grad):
def f(x):
return functions.copy(x, -1)
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad, dtype=numpy.float64,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x_data, self.gy, self.ggx)
class TestCopyArgument(unittest.TestCase):
def setUp(self):
self.x_data = numpy.zeros((2, 3))
def test_call_forward_with_device(self):
functions.copy(self.x_data, cuda.DummyDevice)
testing.run_module(__name__, __file__)
| mit |
groovecoder/kuma | vendor/packages/pygments/lexers/factor.py | 72 | 17864 | # -*- coding: utf-8 -*-
"""
pygments.lexers.factor
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Factor language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default, words
from pygments.token import Text, Comment, Keyword, Name, String, Number
__all__ = ['FactorLexer']
class FactorLexer(RegexLexer):
"""
Lexer for the `Factor <http://factorcode.org>`_ language.
.. versionadded:: 1.4
"""
name = 'Factor'
aliases = ['factor']
filenames = ['*.factor']
mimetypes = ['text/x-factor']
flags = re.MULTILINE | re.UNICODE
builtin_kernel = words((
'-rot', '2bi', '2bi@', '2bi*', '2curry', '2dip', '2drop', '2dup', '2keep', '2nip',
'2over', '2tri', '2tri@', '2tri*', '3bi', '3curry', '3dip', '3drop', '3dup', '3keep',
'3tri', '4dip', '4drop', '4dup', '4keep', '<wrapper>', '=', '>boolean', 'clone',
'?', '?execute', '?if', 'and', 'assert', 'assert=', 'assert?', 'bi', 'bi-curry',
'bi-curry@', 'bi-curry*', 'bi@', 'bi*', 'boa', 'boolean', 'boolean?', 'both?',
'build', 'call', 'callstack', 'callstack>array', 'callstack?', 'clear', '(clone)',
'compose', 'compose?', 'curry', 'curry?', 'datastack', 'die', 'dip', 'do', 'drop',
'dup', 'dupd', 'either?', 'eq?', 'equal?', 'execute', 'hashcode', 'hashcode*',
'identity-hashcode', 'identity-tuple', 'identity-tuple?', 'if', 'if*',
'keep', 'loop', 'most', 'new', 'nip', 'not', 'null', 'object', 'or', 'over',
'pick', 'prepose', 'retainstack', 'rot', 'same?', 'swap', 'swapd', 'throw',
'tri', 'tri-curry', 'tri-curry@', 'tri-curry*', 'tri@', 'tri*', 'tuple',
'tuple?', 'unless', 'unless*', 'until', 'when', 'when*', 'while', 'with',
'wrapper', 'wrapper?', 'xor'), suffix=r'\s')
builtin_assocs = words((
'2cache', '<enum>', '>alist', '?at', '?of', 'assoc', 'assoc-all?',
'assoc-any?', 'assoc-clone-like', 'assoc-combine', 'assoc-diff',
'assoc-diff!', 'assoc-differ', 'assoc-each', 'assoc-empty?',
'assoc-filter', 'assoc-filter!', 'assoc-filter-as', 'assoc-find',
'assoc-hashcode', 'assoc-intersect', 'assoc-like', 'assoc-map',
'assoc-map-as', 'assoc-partition', 'assoc-refine', 'assoc-size',
'assoc-stack', 'assoc-subset?', 'assoc-union', 'assoc-union!',
'assoc=', 'assoc>map', 'assoc?', 'at', 'at+', 'at*', 'cache', 'change-at',
'clear-assoc', 'delete-at', 'delete-at*', 'enum', 'enum?', 'extract-keys',
'inc-at', 'key?', 'keys', 'map>assoc', 'maybe-set-at', 'new-assoc', 'of',
'push-at', 'rename-at', 'set-at', 'sift-keys', 'sift-values', 'substitute',
'unzip', 'value-at', 'value-at*', 'value?', 'values', 'zip'), suffix=r'\s')
builtin_combinators = words((
'2cleave', '2cleave>quot', '3cleave', '3cleave>quot', '4cleave',
'4cleave>quot', 'alist>quot', 'call-effect', 'case', 'case-find',
'case>quot', 'cleave', 'cleave>quot', 'cond', 'cond>quot', 'deep-spread>quot',
'execute-effect', 'linear-case-quot', 'no-case', 'no-case?', 'no-cond',
'no-cond?', 'recursive-hashcode', 'shallow-spread>quot', 'spread',
'to-fixed-point', 'wrong-values', 'wrong-values?'), suffix=r'\s')
builtin_math = words((
'-', '/', '/f', '/i', '/mod', '2/', '2^', '<', '<=', '<fp-nan>', '>',
'>=', '>bignum', '>fixnum', '>float', '>integer', '(all-integers?)',
'(each-integer)', '(find-integer)', '*', '+', '?1+',
'abs', 'align', 'all-integers?', 'bignum', 'bignum?', 'bit?', 'bitand',
'bitnot', 'bitor', 'bits>double', 'bits>float', 'bitxor', 'complex',
'complex?', 'denominator', 'double>bits', 'each-integer', 'even?',
'find-integer', 'find-last-integer', 'fixnum', 'fixnum?', 'float',
'float>bits', 'float?', 'fp-bitwise=', 'fp-infinity?', 'fp-nan-payload',
'fp-nan?', 'fp-qnan?', 'fp-sign', 'fp-snan?', 'fp-special?',
'if-zero', 'imaginary-part', 'integer', 'integer>fixnum',
'integer>fixnum-strict', 'integer?', 'log2', 'log2-expects-positive',
'log2-expects-positive?', 'mod', 'neg', 'neg?', 'next-float',
'next-power-of-2', 'number', 'number=', 'number?', 'numerator', 'odd?',
'out-of-fixnum-range', 'out-of-fixnum-range?', 'power-of-2?',
'prev-float', 'ratio', 'ratio?', 'rational', 'rational?', 'real',
'real-part', 'real?', 'recip', 'rem', 'sgn', 'shift', 'sq', 'times',
'u<', 'u<=', 'u>', 'u>=', 'unless-zero', 'unordered?', 'when-zero',
'zero?'), suffix=r'\s')
builtin_sequences = words((
'1sequence', '2all?', '2each', '2map', '2map-as', '2map-reduce', '2reduce',
'2selector', '2sequence', '3append', '3append-as', '3each', '3map', '3map-as',
'3sequence', '4sequence', '<repetition>', '<reversed>', '<slice>', '?first',
'?last', '?nth', '?second', '?set-nth', 'accumulate', 'accumulate!',
'accumulate-as', 'all?', 'any?', 'append', 'append!', 'append-as',
'assert-sequence', 'assert-sequence=', 'assert-sequence?',
'binary-reduce', 'bounds-check', 'bounds-check?', 'bounds-error',
'bounds-error?', 'but-last', 'but-last-slice', 'cartesian-each',
'cartesian-map', 'cartesian-product', 'change-nth', 'check-slice',
'check-slice-error', 'clone-like', 'collapse-slice', 'collector',
'collector-for', 'concat', 'concat-as', 'copy', 'count', 'cut', 'cut-slice',
'cut*', 'delete-all', 'delete-slice', 'drop-prefix', 'each', 'each-from',
'each-index', 'empty?', 'exchange', 'filter', 'filter!', 'filter-as', 'find',
'find-from', 'find-index', 'find-index-from', 'find-last', 'find-last-from',
'first', 'first2', 'first3', 'first4', 'flip', 'follow', 'fourth', 'glue', 'halves',
'harvest', 'head', 'head-slice', 'head-slice*', 'head*', 'head?',
'if-empty', 'immutable', 'immutable-sequence', 'immutable-sequence?',
'immutable?', 'index', 'index-from', 'indices', 'infimum', 'infimum-by',
'insert-nth', 'interleave', 'iota', 'iota-tuple', 'iota-tuple?', 'join',
'join-as', 'last', 'last-index', 'last-index-from', 'length', 'lengthen',
'like', 'longer', 'longer?', 'longest', 'map', 'map!', 'map-as', 'map-find',
'map-find-last', 'map-index', 'map-integers', 'map-reduce', 'map-sum',
'max-length', 'member-eq?', 'member?', 'midpoint@', 'min-length',
'mismatch', 'move', 'new-like', 'new-resizable', 'new-sequence',
'non-negative-integer-expected', 'non-negative-integer-expected?',
'nth', 'nths', 'pad-head', 'pad-tail', 'padding', 'partition', 'pop', 'pop*',
'prefix', 'prepend', 'prepend-as', 'produce', 'produce-as', 'product', 'push',
'push-all', 'push-either', 'push-if', 'reduce', 'reduce-index', 'remove',
'remove!', 'remove-eq', 'remove-eq!', 'remove-nth', 'remove-nth!', 'repetition',
'repetition?', 'replace-slice', 'replicate', 'replicate-as', 'rest',
'rest-slice', 'reverse', 'reverse!', 'reversed', 'reversed?', 'second',
'selector', 'selector-for', 'sequence', 'sequence-hashcode', 'sequence=',
'sequence?', 'set-first', 'set-fourth', 'set-last', 'set-length', 'set-nth',
'set-second', 'set-third', 'short', 'shorten', 'shorter', 'shorter?',
'shortest', 'sift', 'slice', 'slice-error', 'slice-error?', 'slice?',
'snip', 'snip-slice', 'start', 'start*', 'subseq', 'subseq?', 'suffix',
'suffix!', 'sum', 'sum-lengths', 'supremum', 'supremum-by', 'surround', 'tail',
'tail-slice', 'tail-slice*', 'tail*', 'tail?', 'third', 'trim',
'trim-head', 'trim-head-slice', 'trim-slice', 'trim-tail', 'trim-tail-slice',
'unclip', 'unclip-last', 'unclip-last-slice', 'unclip-slice', 'unless-empty',
'virtual-exemplar', 'virtual-sequence', 'virtual-sequence?', 'virtual@',
'when-empty'), suffix=r'\s')
builtin_namespaces = words((
'+@', 'change', 'change-global', 'counter', 'dec', 'get', 'get-global',
'global', 'inc', 'init-namespaces', 'initialize', 'is-global', 'make-assoc',
'namespace', 'namestack', 'off', 'on', 'set', 'set-global', 'set-namestack',
'toggle', 'with-global', 'with-scope', 'with-variable', 'with-variables'),
suffix=r'\s')
builtin_arrays = words((
'1array', '2array', '3array', '4array', '<array>', '>array', 'array',
'array?', 'pair', 'pair?', 'resize-array'), suffix=r'\s')
builtin_io = words((
'(each-stream-block-slice)', '(each-stream-block)',
'(stream-contents-by-block)', '(stream-contents-by-element)',
'(stream-contents-by-length-or-block)',
'(stream-contents-by-length)', '+byte+', '+character+',
'bad-seek-type', 'bad-seek-type?', 'bl', 'contents', 'each-block',
'each-block-size', 'each-block-slice', 'each-line', 'each-morsel',
'each-stream-block', 'each-stream-block-slice', 'each-stream-line',
'error-stream', 'flush', 'input-stream', 'input-stream?',
'invalid-read-buffer', 'invalid-read-buffer?', 'lines', 'nl',
'output-stream', 'output-stream?', 'print', 'read', 'read-into',
'read-partial', 'read-partial-into', 'read-until', 'read1', 'readln',
'seek-absolute', 'seek-absolute?', 'seek-end', 'seek-end?',
'seek-input', 'seek-output', 'seek-relative', 'seek-relative?',
'stream-bl', 'stream-contents', 'stream-contents*', 'stream-copy',
'stream-copy*', 'stream-element-type', 'stream-flush',
'stream-length', 'stream-lines', 'stream-nl', 'stream-print',
'stream-read', 'stream-read-into', 'stream-read-partial',
'stream-read-partial-into', 'stream-read-partial-unsafe',
'stream-read-unsafe', 'stream-read-until', 'stream-read1',
'stream-readln', 'stream-seek', 'stream-seekable?', 'stream-tell',
'stream-write', 'stream-write1', 'tell-input', 'tell-output',
'with-error-stream', 'with-error-stream*', 'with-error>output',
'with-input-output+error-streams',
'with-input-output+error-streams*', 'with-input-stream',
'with-input-stream*', 'with-output-stream', 'with-output-stream*',
'with-output>error', 'with-output+error-stream',
'with-output+error-stream*', 'with-streams', 'with-streams*',
'write', 'write1'), suffix=r'\s')
builtin_strings = words((
'1string', '<string>', '>string', 'resize-string', 'string',
'string?'), suffix=r'\s')
builtin_vectors = words((
'1vector', '<vector>', '>vector', '?push', 'vector', 'vector?'),
suffix=r'\s')
builtin_continuations = words((
'<condition>', '<continuation>', '<restart>', 'attempt-all',
'attempt-all-error', 'attempt-all-error?', 'callback-error-hook',
'callcc0', 'callcc1', 'cleanup', 'compute-restarts', 'condition',
'condition?', 'continuation', 'continuation?', 'continue',
'continue-restart', 'continue-with', 'current-continuation',
'error', 'error-continuation', 'error-in-thread', 'error-thread',
'ifcc', 'ignore-errors', 'in-callback?', 'original-error', 'recover',
'restart', 'restart?', 'restarts', 'rethrow', 'rethrow-restarts',
'return', 'return-continuation', 'thread-error-hook', 'throw-continue',
'throw-restarts', 'with-datastack', 'with-return'), suffix=r'\s')
tokens = {
'root': [
# factor allows a file to start with a shebang
(r'#!.*$', Comment.Preproc),
default('base'),
],
'base': [
(r'\s+', Text),
# defining words
(r'((?:MACRO|MEMO|TYPED)?:[:]?)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'(M:[:]?)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Function)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Class)),
(r'(GENERIC:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Function)),
(r'\(\s', Name.Function, 'stackeffect'),
(r';\s', Keyword),
# imports and namespaces
(r'(USING:)(\s+)',
bygroups(Keyword.Namespace, Text), 'vocabs'),
(r'(USE:|UNUSE:|IN:|QUALIFIED:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(QUALIFIED-WITH:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text, Name.Namespace)),
(r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+=>\s)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text), 'words'),
(r'(RENAME:)(\s+)(\S+)(\s+)(\S+)(\s+=>\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Namespace, Text, Name.Function)),
(r'(ALIAS:|TYPEDEF:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function)),
(r'(DEFER:|FORGET:|POSTPONE:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function)),
# tuples and classes
(r'(TUPLE:|ERROR:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class), 'slots'),
(r'(TUPLE:|ERROR:|BUILTIN:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class), 'slots'),
(r'(MIXIN:|UNION:|INTERSECTION:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class)),
(r'(PREDICATE:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Class)),
(r'(INSTANCE:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class)),
(r'(SLOT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Function)),
(r'(SINGLETON:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'SINGLETONS:', Keyword, 'classes'),
# other syntax
(r'(CONSTANT:|SYMBOL:|MAIN:|HELP:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'SYMBOLS:\s', Keyword, 'words'),
(r'SYNTAX:\s', Keyword),
(r'ALIEN:\s', Keyword),
(r'(STRUCT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'(FUNCTION:)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)',
bygroups(Keyword.Namespace, Text, Name.Function, Text)),
(r'(FUNCTION-ALIAS:)(\s+)(\S+)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function, Text)),
# vocab.private
(r'(?:<PRIVATE|PRIVATE>)\s', Keyword.Namespace),
# strings
(r'"""\s+(?:.|\n)*?\s+"""', String),
(r'"(?:\\\\|\\"|[^"])*"', String),
(r'\S+"\s+(?:\\\\|\\"|[^"])*"', String),
(r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char),
# comments
(r'!\s+.*$', Comment),
(r'#!\s+.*$', Comment),
(r'/\*\s+(?:.|\n)*?\s\*/\s', Comment),
# boolean constants
(r'[tf]\s', Name.Constant),
# symbols and literals
(r'[\\$]\s+\S+', Name.Constant),
(r'M\\\s+\S+\s+\S+', Name.Constant),
# numbers
(r'[+-]?(?:[\d,]*\d)?\.(?:\d([\d,]*\d)?)?(?:[eE][+-]?\d+)?\s', Number),
(r'[+-]?\d(?:[\d,]*\d)?(?:[eE][+-]?\d+)?\s', Number),
(r'0x[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'NAN:\s+[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'0b[01]+\s', Number.Bin),
(r'0o[0-7]+\s', Number.Oct),
(r'(?:\d([\d,]*\d)?)?\+\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
(r'(?:\-\d([\d,]*\d)?)?\-\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
# keywords
(r'(?:deprecated|final|foldable|flushable|inline|recursive)\s',
Keyword),
# builtins
(builtin_kernel, Name.Builtin),
(builtin_assocs, Name.Builtin),
(builtin_combinators, Name.Builtin),
(builtin_math, Name.Builtin),
(builtin_sequences, Name.Builtin),
(builtin_namespaces, Name.Builtin),
(builtin_arrays, Name.Builtin),
(builtin_io, Name.Builtin),
(builtin_strings, Name.Builtin),
(builtin_vectors, Name.Builtin),
(builtin_continuations, Name.Builtin),
# everything else is text
(r'\S+', Text),
],
'stackeffect': [
(r'\s+', Text),
(r'\(\s+', Name.Function, 'stackeffect'),
(r'\)\s', Name.Function, '#pop'),
(r'--\s', Name.Function),
(r'\S+', Name.Variable),
],
'slots': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)',
bygroups(Text, Name.Variable, Text)),
(r'\S+', Name.Variable),
],
'vocabs': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Namespace),
],
'classes': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Class),
],
'words': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Function),
],
}
| mpl-2.0 |
dreispt/project-service | project_issue_reassign/__openerp__.py | 7 | 1411 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Daniel Reis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Reassign Project Issues',
'summary': 'Reassign Issues to other Responsibles and Projects',
'version': '8.0.1.1.0',
"category": "Project Management",
'author': 'Daniel Reis, Odoo Community Association (OCA)',
'license': 'AGPL-3',
'website': 'https://github.com/OCA/project-service',
'depends': [
'project_issue',
],
'data': [
'wizard/project_issue_reassign_view.xml',
'project_issue_view.xml',
],
'installable': True,
}
| agpl-3.0 |
Jmainguy/ansible-modules-core | utilities/logic/pause.py | 35 | 2321 | # -*- mode: python -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: pause
short_description: Pause playbook execution
description:
- Pauses playbook execution for a set amount of time, or until a prompt is acknowledged. All parameters are optional. The default behavior is to pause with a prompt.
- "You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely. To continue early: press C(ctrl+c) and then C(c). To abort a playbook: press C(ctrl+c) and then C(a)."
- "The pause module integrates into async/parallelized playbooks without any special considerations (see also: Rolling Updates). When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts."
version_added: "0.8"
options:
minutes:
description:
- A positive number of minutes to pause for.
required: false
default: null
seconds:
description:
- A positive number of seconds to pause for.
required: false
default: null
prompt:
description:
- Optional text to use for the prompt message.
required: false
default: null
author: "Tim Bielawa (@tbielawa)"
notes:
- Starting in 2.2, if you specify 0 or negative for minutes or seconds, it will wait for 1 second, previously it would wait indefinitely.
'''
EXAMPLES = '''
# Pause for 5 minutes to build app cache.
- pause: minutes=5
# Pause until you can verify updates to an application were successful.
- pause:
# A helpful reminder of what to look out for post-update.
- pause: prompt="Make sure org.foo.FooOverload exception is not present"
'''
| gpl-3.0 |
Edraak/edraak-platform | openedx/core/djangoapps/content/course_structures/management/commands/generate_course_structure.py | 13 | 2300 | """
Django Management Command: Generate Course Structure
Generates and stores course structure information for one or more courses.
"""
import logging
from django.core.management.base import BaseCommand
from opaque_keys.edx.keys import CourseKey
from six import text_type
from openedx.core.djangoapps.content.course_structures.tasks import update_course_structure
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Generates and stores course structure information for one or more courses.
"""
help = 'Generates and stores course structure for one or more courses.'
def add_arguments(self, parser):
parser.add_argument('course_id', nargs='*')
parser.add_argument('--all',
action='store_true',
help='Generate structures for all courses.')
def handle(self, *args, **options):
"""
Perform the course structure generation workflow
"""
if options['all']:
course_keys = [course.id for course in modulestore().get_courses()]
else:
course_keys = [CourseKey.from_string(arg) for arg in options['course_id']]
if not course_keys:
log.fatal('No courses specified.')
return
log.info('Generating course structures for %d courses.', len(course_keys))
log.debug('Generating course structure(s) for the following courses: %s', course_keys)
for course_key in course_keys:
try:
# Run the update task synchronously so that we know when all course structures have been updated.
# TODO Future improvement: Use .delay(), add return value to ResultSet, and wait for execution of
# all tasks using ResultSet.join(). I (clintonb) am opting not to make this improvement right now
# as I do not have time to test it fully.
update_course_structure.apply(args=[text_type(course_key)])
except Exception as ex:
log.exception('An error occurred while generating course structure for %s: %s',
text_type(course_key), text_type(ex))
log.info('Finished generating course structures.')
| agpl-3.0 |
yongshengwang/hue | build/env/lib/python2.7/site-packages/pip/_vendor/progress/bar.py | 404 | 2707 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
from . import Progress
from .helpers import WritelnMixin
class Bar(WritelnMixin, Progress):
width = 32
message = ''
suffix = '%(index)d/%(max)d'
bar_prefix = ' |'
bar_suffix = '| '
empty_fill = ' '
fill = '#'
hide_cursor = True
def update(self):
filled_length = int(self.width * self.progress)
empty_length = self.width - filled_length
message = self.message % self
bar = self.fill * filled_length
empty = self.empty_fill * empty_length
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix,
suffix])
self.writeln(line)
class ChargingBar(Bar):
suffix = '%(percent)d%%'
bar_prefix = ' '
bar_suffix = ' '
empty_fill = '∙'
fill = '█'
class FillingSquaresBar(ChargingBar):
empty_fill = '▢'
fill = '▣'
class FillingCirclesBar(ChargingBar):
empty_fill = '◯'
fill = '◉'
class IncrementalBar(Bar):
phases = (' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█')
def update(self):
nphases = len(self.phases)
expanded_length = int(nphases * self.width * self.progress)
filled_length = int(self.width * self.progress)
empty_length = self.width - filled_length
phase = expanded_length - (filled_length * nphases)
message = self.message % self
bar = self.phases[-1] * filled_length
current = self.phases[phase] if phase > 0 else ''
empty = self.empty_fill * max(0, empty_length - len(current))
suffix = self.suffix % self
line = ''.join([message, self.bar_prefix, bar, current, empty,
self.bar_suffix, suffix])
self.writeln(line)
class ShadyBar(IncrementalBar):
phases = (' ', '░', '▒', '▓', '█')
| apache-2.0 |
gautamMalu/linux-samsung-arndale-xen | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
mafagafogigante/scripts | docdist.py | 1 | 1991 | #!/usr/bin/env python3
import argparse
import collections
import string
def make_punctuation_translation_table():
# It is safer to replace punctuation by spaces as it prevents merging incorrectly separated words together.
return str.maketrans(string.punctuation, ' ' * len(string.punctuation))
def count_words(text):
text = text.lower().translate(make_punctuation_translation_table())
dictionary = collections.defaultdict(lambda: 0)
for gram in text.split():
dictionary[gram] += 1
return dictionary
def norm(vector):
return sum(x ** 2 for x in vector) ** .5
def write_dictionary(dictionary):
lines = []
for key, value in sorted(dictionary.items(), key=lambda entry: entry[1], reverse=True): # Sort by value
lines.append(" '{}': {}".format(key, value))
print('\n'.join(lines))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Uses cosine similarity to estimate document distance.")
parser.add_argument("a", help="a text file")
parser.add_argument("b", help="a text file")
parser.add_argument("-v", "--verbose", action="store_true", help="display word counts")
arguments = parser.parse_args()
with open(arguments.a, 'r') as a_file:
with open(arguments.b, 'r') as b_file:
a_words = count_words(' '.join(a_file.readlines()))
b_words = count_words(' '.join(b_file.readlines()))
if arguments.verbose:
print("Word count of", arguments.a)
write_dictionary(a_words)
print("Word count of", arguments.b)
write_dictionary(b_words)
numerator = 0
for word in a_words.keys():
numerator += a_words[word] * b_words[word]
result = 0
if numerator != 0:
denominator = norm(a_words.values()) * norm(b_words.values())
result = numerator / denominator
print("cos(θ)", "=", result)
| bsd-2-clause |
android-ia/platform_external_chromium_org | tools/deep_memory_profiler/lib/sorter.py | 100 | 14314 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import json
import logging
import os
import re
from lib.ordered_dict import OrderedDict
LOGGER = logging.getLogger('dmprof')
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEFAULT_SORTERS = [
os.path.join(BASE_PATH, 'sorters', 'malloc.browser-module.json'),
os.path.join(BASE_PATH, 'sorters', 'malloc.renderer-module.json'),
os.path.join(BASE_PATH, 'sorters', 'malloc.type.json'),
os.path.join(BASE_PATH, 'sorters', 'malloc.WebCore.json'),
os.path.join(BASE_PATH, 'sorters', 'vm.Android-specific.json'),
os.path.join(BASE_PATH, 'sorters', 'vm.base.json'),
os.path.join(BASE_PATH, 'sorters', 'vm.GPU.json'),
os.path.join(BASE_PATH, 'sorters', 'vm.sharing.json'),
os.path.join(BASE_PATH, 'sorters', 'vm.Skia.json'),
os.path.join(BASE_PATH, 'sorters', 'vm.V8.json'),
]
DEFAULT_TEMPLATES = os.path.join(BASE_PATH, 'templates.json')
class Unit(object):
"""Represents a minimum unit of memory usage categorization.
It is supposed to be inherited for some different spaces like the entire
virtual memory and malloc arena. Such different spaces are called "worlds"
in dmprof. (For example, the "vm" world and the "malloc" world.)
"""
def __init__(self, unit_id, size):
self._unit_id = unit_id
self._size = size
@property
def unit_id(self):
return self._unit_id
@property
def size(self):
return self._size
class VMUnit(Unit):
"""Represents a Unit for a memory region on virtual memory."""
def __init__(self, unit_id, committed, reserved, mmap, region,
pageframe=None, group_pfn_counts=None):
super(VMUnit, self).__init__(unit_id, committed)
self._reserved = reserved
self._mmap = mmap
self._region = region
self._pageframe = pageframe
self._group_pfn_counts = group_pfn_counts
@property
def committed(self):
return self._size
@property
def reserved(self):
return self._reserved
@property
def mmap(self):
return self._mmap
@property
def region(self):
return self._region
@property
def pageframe(self):
return self._pageframe
@property
def group_pfn_counts(self):
return self._group_pfn_counts
class MMapUnit(VMUnit):
"""Represents a Unit for a mmap'ed region."""
def __init__(self, unit_id, committed, reserved, region, bucket_set,
pageframe=None, group_pfn_counts=None):
super(MMapUnit, self).__init__(unit_id, committed, reserved, True,
region, pageframe, group_pfn_counts)
self._bucket_set = bucket_set
def __repr__(self):
return str(self.region)
@property
def bucket_set(self):
return self._bucket_set
class UnhookedUnit(VMUnit):
"""Represents a Unit for a non-mmap'ed memory region on virtual memory."""
def __init__(self, unit_id, committed, reserved, region,
pageframe=None, group_pfn_counts=None):
super(UnhookedUnit, self).__init__(unit_id, committed, reserved, False,
region, pageframe, group_pfn_counts)
def __repr__(self):
return str(self.region)
class MallocUnit(Unit):
"""Represents a Unit for a malloc'ed memory block."""
def __init__(self, unit_id, size, alloc_count, free_count, bucket):
super(MallocUnit, self).__init__(unit_id, size)
self._bucket = bucket
self._alloc_count = alloc_count
self._free_count = free_count
def __repr__(self):
return str(self.bucket)
@property
def bucket(self):
return self._bucket
@property
def alloc_count(self):
return self._alloc_count
@property
def free_count(self):
return self._free_count
class UnitSet(object):
"""Represents an iterable set of Units."""
def __init__(self, world):
self._units = {}
self._world = world
def __repr__(self):
return str(self._units)
def __iter__(self):
for unit_id in sorted(self._units):
yield self._units[unit_id]
def append(self, unit, overwrite=False):
if not overwrite and unit.unit_id in self._units:
LOGGER.error('The unit id=%s already exists.' % str(unit.unit_id))
self._units[unit.unit_id] = unit
class AbstractRule(object):
"""An abstract class for rules to be matched with units."""
def __init__(self, dct):
self._name = dct['name']
self._hidden = dct.get('hidden', False)
self._subs = dct.get('subs', [])
def match(self, unit):
raise NotImplementedError()
@property
def name(self):
return self._name
@property
def hidden(self):
return self._hidden
def iter_subs(self):
for sub in self._subs:
yield sub
class VMRule(AbstractRule):
"""Represents a Rule to match with virtual memory regions."""
def __init__(self, dct):
super(VMRule, self).__init__(dct)
self._backtrace_function = dct.get('backtrace_function', None)
if self._backtrace_function:
self._backtrace_function = re.compile(self._backtrace_function)
self._backtrace_sourcefile = dct.get('backtrace_sourcefile', None)
if self._backtrace_sourcefile:
self._backtrace_sourcefile = re.compile(self._backtrace_sourcefile)
self._mmap = dct.get('mmap', None)
self._sharedwith = dct.get('sharedwith', [])
self._mapped_pathname = dct.get('mapped_pathname', None)
if self._mapped_pathname:
self._mapped_pathname = re.compile(self._mapped_pathname)
self._mapped_permission = dct.get('mapped_permission', None)
if self._mapped_permission:
self._mapped_permission = re.compile(self._mapped_permission)
def __repr__(self):
result = cStringIO.StringIO()
result.write('%s: ' % self._name)
attributes = []
attributes.append('mmap: %s' % self._mmap)
if self._backtrace_function:
attributes.append('backtrace_function: "%s"' %
self._backtrace_function.pattern)
if self._sharedwith:
attributes.append('sharedwith: "%s"' % self._sharedwith)
if self._mapped_pathname:
attributes.append('mapped_pathname: "%s"' % self._mapped_pathname.pattern)
if self._mapped_permission:
attributes.append('mapped_permission: "%s"' %
self._mapped_permission.pattern)
result.write('{ %s }' % ', '.join(attributes))
return result.getvalue()
def match(self, unit):
if unit.mmap:
assert unit.region[0] == 'hooked'
bucket = unit.bucket_set.get(unit.region[1]['bucket_id'])
assert bucket
assert bucket.allocator_type == 'mmap'
stackfunction = bucket.symbolized_joined_stackfunction
stacksourcefile = bucket.symbolized_joined_stacksourcefile
# TODO(dmikurube): Support shared memory.
sharedwith = None
if self._mmap == False: # (self._mmap == None) should go through.
return False
if (self._backtrace_function and
not self._backtrace_function.match(stackfunction)):
return False
if (self._backtrace_sourcefile and
not self._backtrace_sourcefile.match(stacksourcefile)):
return False
if (self._mapped_pathname and
not self._mapped_pathname.match(unit.region[1]['vma']['name'])):
return False
if (self._mapped_permission and
not self._mapped_permission.match(
unit.region[1]['vma']['readable'] +
unit.region[1]['vma']['writable'] +
unit.region[1]['vma']['executable'] +
unit.region[1]['vma']['private'])):
return False
if (self._sharedwith and
unit.pageframe and sharedwith not in self._sharedwith):
return False
return True
else:
assert unit.region[0] == 'unhooked'
# TODO(dmikurube): Support shared memory.
sharedwith = None
if self._mmap == True: # (self._mmap == None) should go through.
return False
if (self._mapped_pathname and
not self._mapped_pathname.match(unit.region[1]['vma']['name'])):
return False
if (self._mapped_permission and
not self._mapped_permission.match(
unit.region[1]['vma']['readable'] +
unit.region[1]['vma']['writable'] +
unit.region[1]['vma']['executable'] +
unit.region[1]['vma']['private'])):
return False
if (self._sharedwith and
unit.pageframe and sharedwith not in self._sharedwith):
return False
return True
class MallocRule(AbstractRule):
"""Represents a Rule to match with malloc'ed blocks."""
def __init__(self, dct):
super(MallocRule, self).__init__(dct)
self._backtrace_function = dct.get('backtrace_function', None)
if self._backtrace_function:
self._backtrace_function = re.compile(self._backtrace_function)
self._backtrace_sourcefile = dct.get('backtrace_sourcefile', None)
if self._backtrace_sourcefile:
self._backtrace_sourcefile = re.compile(self._backtrace_sourcefile)
self._typeinfo = dct.get('typeinfo', None)
if self._typeinfo:
self._typeinfo = re.compile(self._typeinfo)
def __repr__(self):
result = cStringIO.StringIO()
result.write('%s: ' % self._name)
attributes = []
if self._backtrace_function:
attributes.append('backtrace_function: "%s"' %
self._backtrace_function.pattern)
if self._typeinfo:
attributes.append('typeinfo: "%s"' % self._typeinfo.pattern)
result.write('{ %s }' % ', '.join(attributes))
return result.getvalue()
def match(self, unit):
assert unit.bucket.allocator_type == 'malloc'
stackfunction = unit.bucket.symbolized_joined_stackfunction
stacksourcefile = unit.bucket.symbolized_joined_stacksourcefile
typeinfo = unit.bucket.symbolized_typeinfo
if typeinfo.startswith('0x'):
typeinfo = unit.bucket.typeinfo_name
return ((not self._backtrace_function or
self._backtrace_function.match(stackfunction)) and
(not self._backtrace_sourcefile or
self._backtrace_sourcefile.match(stacksourcefile)) and
(not self._typeinfo or self._typeinfo.match(typeinfo)))
class AbstractSorter(object):
"""An abstract class for classifying Units with a set of Rules."""
def __init__(self, dct):
self._type = 'sorter'
self._version = dct['version']
self._world = dct['world']
self._name = dct['name']
self._root = dct.get('root', False)
self._order = dct['order']
self._rules = []
for rule in dct['rules']:
if dct['world'] == 'vm':
self._rules.append(VMRule(rule))
elif dct['world'] == 'malloc':
self._rules.append(MallocRule(rule))
else:
LOGGER.error('Unknown sorter world type')
def __repr__(self):
result = cStringIO.StringIO()
print >> result, '%s' % self._name
print >> result, 'world=%s' % self._world
print >> result, 'name=%s' % self._name
print >> result, 'order=%s' % self._order
print >> result, 'rules:'
for rule in self._rules:
print >> result, ' %s' % rule
return result.getvalue()
@staticmethod
def load(filename):
with open(filename) as sorter_f:
sorter_dict = json.load(sorter_f, object_pairs_hook=OrderedDict)
if sorter_dict['world'] == 'vm':
return VMSorter(sorter_dict)
elif sorter_dict['world'] == 'malloc':
return MallocSorter(sorter_dict)
else:
LOGGER.error('Unknown sorter world type')
return None
@property
def world(self):
return self._world
@property
def name(self):
return self._name
@property
def root(self):
return self._root
def iter_rule(self):
for rule in self._rules:
yield rule
def find(self, unit):
raise NotImplementedError()
def find_rule(self, name):
"""Finds a rule whose name is |name|. """
for rule in self._rules:
if rule.name == name:
return rule
return None
class VMSorter(AbstractSorter):
"""Represents a Sorter for memory regions on virtual memory."""
def __init__(self, dct):
assert dct['world'] == 'vm'
super(VMSorter, self).__init__(dct)
def find(self, unit):
for rule in self._rules:
if rule.match(unit):
return rule
return None
class MallocSorter(AbstractSorter):
"""Represents a Sorter for malloc'ed blocks."""
def __init__(self, dct):
assert dct['world'] == 'malloc'
super(MallocSorter, self).__init__(dct)
def find(self, unit):
if not unit.bucket:
return None
assert unit.bucket.allocator_type == 'malloc'
# TODO(dmikurube): Utilize component_cache again, or remove it.
for rule in self._rules:
if rule.match(unit):
return rule
return None
class SorterTemplates(object):
"""Represents a template for sorters."""
def __init__(self, dct):
self._dict = dct
def as_dict(self):
return self._dict
@staticmethod
def load(filename):
with open(filename) as templates_f:
templates_dict = json.load(templates_f, object_pairs_hook=OrderedDict)
return SorterTemplates(templates_dict)
class SorterSet(object):
"""Represents an iterable set of Sorters."""
def __init__(self, additional=None, default=None):
if not additional:
additional = []
if not default:
default = DEFAULT_SORTERS
self._sorters = {}
LOGGER.info('Loading sorters.')
for filename in default + additional:
LOGGER.info(' Loading a sorter "%s".' % filename)
sorter = AbstractSorter.load(filename)
if sorter.world not in self._sorters:
self._sorters[sorter.world] = []
self._sorters[sorter.world].append(sorter)
self._templates = SorterTemplates.load(DEFAULT_TEMPLATES)
def __repr__(self):
result = cStringIO.StringIO()
for world, sorters in self._sorters.iteritems():
for sorter in sorters:
print >> result, '%s: %s' % (world, sorter)
return result.getvalue()
def __iter__(self):
for sorters in self._sorters.itervalues():
for sorter in sorters:
yield sorter
def iter_world(self, world):
for sorter in self._sorters.get(world, []):
yield sorter
@property
def templates(self):
return self._templates
| bsd-3-clause |
3dfxmadscientist/odoo_vi | openerp/tools/amount_to_text_en.py | 441 | 5103 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from translate import _
_logger = logging.getLogger(__name__)
#-------------------------------------------------------------
#ENGLISH
#-------------------------------------------------------------
to_19 = ( 'Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six',
'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen',
'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen' )
tens = ( 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety')
denom = ( '',
'Thousand', 'Million', 'Billion', 'Trillion', 'Quadrillion',
'Quintillion', 'Sextillion', 'Septillion', 'Octillion', 'Nonillion',
'Decillion', 'Undecillion', 'Duodecillion', 'Tredecillion', 'Quattuordecillion',
'Sexdecillion', 'Septendecillion', 'Octodecillion', 'Novemdecillion', 'Vigintillion' )
def _convert_nn(val):
"""convert a value < 100 to English.
"""
if val < 20:
return to_19[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens)):
if dval + 10 > val:
if val % 10:
return dcap + '-' + to_19[val % 10]
return dcap
def _convert_nnn(val):
"""
convert a value < 1000 to english, special cased because it is the level that kicks
off the < 100 special case. The rest are more general. This also allows you to
get strings in the form of 'forty-five hundred' if called directly.
"""
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 0:
word = to_19[rem] + ' Hundred'
if mod > 0:
word += ' '
if mod > 0:
word += _convert_nn(mod)
return word
def english_number(val):
if val < 100:
return _convert_nn(val)
if val < 1000:
return _convert_nnn(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom))):
if dval > val:
mod = 1000 ** didx
l = val // mod
r = val - (l * mod)
ret = _convert_nnn(l) + ' ' + denom[didx]
if r > 0:
ret = ret + ', ' + english_number(r)
return ret
def amount_to_text(number, currency):
number = '%.2f' % number
units_name = currency
list = str(number).split('.')
start_word = english_number(int(list[0]))
end_word = english_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and 'Cents' or 'Cent'
return ' '.join(filter(None, [start_word, units_name, (start_word or units_name) and (end_word or cents_name) and 'and', end_word, cents_name]))
#-------------------------------------------------------------
# Generic functions
#-------------------------------------------------------------
_translate_funcs = {'en' : amount_to_text}
#TODO: we should use the country AND language (ex: septante VS soixante dix)
#TODO: we should use en by default, but the translation func is yet to be implemented
def amount_to_text(nbr, lang='en', currency='euro'):
""" Converts an integer to its textual representation, using the language set in the context if any.
Example::
1654: thousands six cent cinquante-quatre.
"""
import openerp.loglevels as loglevels
# if nbr > 10000000:
# _logger.warning(_("Number too large '%d', can not translate it"))
# return str(nbr)
if not _translate_funcs.has_key(lang):
_logger.warning(_("no translation function found for lang: '%s'"), lang)
#TODO: (default should be en) same as above
lang = 'en'
return _translate_funcs[lang](abs(nbr), currency)
if __name__=='__main__':
from sys import argv
lang = 'nl'
if len(argv) < 2:
for i in range(1,200):
print i, ">>", int_to_text(i, lang)
for i in range(200,999999,139):
print i, ">>", int_to_text(i, lang)
else:
print int_to_text(int(argv[1]), lang)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pfmoore/invoke | invoke/platform.py | 1 | 5500 | """
Platform-specific code lives here.
This is its own module to abstract away what would otherwise be distracting
logic-flow interruptions.
"""
from contextlib import contextmanager
import select
import sys
# TODO: move in here? They're currently platform-agnostic...
from .util import has_fileno, isatty
WINDOWS = (sys.platform == 'win32')
"""
Whether or not the current platform appears to be Windows in nature.
Note that Cygwin's Python is actually close enough to "real" UNIXes that it
doesn't need (or want!) to use PyWin32 -- so we only test for literal Win32
setups (vanilla Python, ActiveState etc) here.
"""
if WINDOWS:
import msvcrt
from ctypes import Structure, c_ushort, windll, POINTER, byref
from ctypes.wintypes import HANDLE, _COORD, _SMALL_RECT
else:
import fcntl
import struct
import termios
import tty
def pty_size():
"""
Determine current local pseudoterminal dimensions.
:returns:
A ``(num_cols, num_rows)`` two-tuple describing PTY size. Defaults to
``(80, 24)`` if unable to get a sensible result dynamically.
"""
cols, rows = _pty_size() if not WINDOWS else _win_pty_size()
# TODO: make defaults configurable?
return ((cols or 80), (rows or 24))
def _pty_size():
"""
Suitable for most POSIX platforms.
"""
# Sentinel values to be replaced w/ defaults by caller
size = (None, None)
# We want two short unsigned integers (rows, cols)
fmt = 'HH'
# Create an empty (zeroed) buffer for ioctl to map onto. Yay for C!
buf = struct.pack(fmt, 0, 0)
# Call TIOCGWINSZ to get window size of stdout, returns our filled
# buffer
try:
result = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, buf)
# Unpack buffer back into Python data types
# NOTE: this unpack gives us rows x cols, but we return the
# inverse.
rows, cols = struct.unpack(fmt, result)
return (cols, rows)
# Fallback to emptyish return value in various failure cases:
# * sys.stdout being monkeypatched, such as in testing, and lacking .fileno
# * sys.stdout having a .fileno but not actually being attached to a TTY
# * termios not having a TIOCGWINSZ attribute (happens sometimes...)
# * other situations where ioctl doesn't explode but the result isn't
# something unpack can deal with
except (struct.error, TypeError, IOError, AttributeError):
pass
return size
def _win_pty_size():
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [
('dwSize', _COORD),
('dwCursorPosition', _COORD),
('wAttributes', c_ushort),
('srWindow', _SMALL_RECT),
('dwMaximumWindowSize', _COORD)
]
GetStdHandle = windll.kernel32.GetStdHandle
GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
GetStdHandle.restype = HANDLE
GetConsoleScreenBufferInfo.argtypes = [
HANDLE, POINTER(CONSOLE_SCREEN_BUFFER_INFO)
]
hstd = GetStdHandle(-11) # STD_OUTPUT_HANDLE = -11
csbi = CONSOLE_SCREEN_BUFFER_INFO()
ret = GetConsoleScreenBufferInfo(hstd, byref(csbi))
if ret:
sizex = csbi.srWindow.Right - csbi.srWindow.Left + 1
sizey = csbi.srWindow.Bottom - csbi.srWindow.Top + 1
return sizex, sizey
else:
return (None, None)
@contextmanager
def character_buffered(stream):
"""
Force local terminal ``stream`` be character, not line, buffered.
Only applies to Unix-based systems; on Windows this is a no-op.
"""
if WINDOWS or not isatty(stream):
yield
else:
old_settings = termios.tcgetattr(stream)
tty.setcbreak(stream)
try:
yield
finally:
termios.tcsetattr(stream, termios.TCSADRAIN, old_settings)
def ready_for_reading(input_):
"""
Test ``input_`` to determine whether a read action will succeed.
:param input_: Input stream object (file-like).
:returns: ``True`` if a read should succeed, ``False`` otherwise.
"""
# A "real" terminal stdin needs select/kbhit to tell us when it's ready for
# a nonblocking read().
# Otherwise, assume a "safer" file-like object that can be read from in a
# nonblocking fashion (e.g. a StringIO or regular file).
if not has_fileno(input_):
return True
if WINDOWS:
return msvcrt.kbhit()
else:
reads, _, _ = select.select([input_], [], [], 0.0)
return bool(reads and reads[0] is input_)
def read_byte(input_):
"""
Read 1 byte from stdin stream ``input_``.
:param input_: Input stream object (file-like).
:returns:
The read byte (a ``str`` or ``bytes`` depending on Python version.)
"""
# NOTE: there may be dragons here re: what exactly input_ is and what mode
# it has been opened in.
# NOTE: used to use msvcrt.getch() on Win which is why it's in platform.py.
# NOTE: msvcrt.getch was unequivocally wrong - it ignores the argument
# input_, and its behaviour isn't even what we want if input_ is
# the console. It returns a byte, which is not what input_.read() does
# (in spite of the function name!) when input_is opened in text mode
# like sys.stdin. And when the user presses a special key like F1 (or even
# just a non-ASCII international character) it returns the first byte of
# a control sequence that isn't even valid encoded Unicode.
return input_.read(1)
| bsd-2-clause |
ity/pants | contrib/android/tests/python/pants_test/contrib/android/test_android_base.py | 14 | 5335 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import textwrap
from contextlib import contextmanager
from pants.util.contextutil import temporary_dir, temporary_file
from pants.util.dirutil import chmod_plus_x, touch
from pants_test.jvm.jvm_tool_task_test_base import JvmToolTaskTestBase
from twitter.common.collections import maybe_list
from pants.contrib.android.targets.android_binary import AndroidBinary
from pants.contrib.android.targets.android_library import AndroidLibrary
from pants.contrib.android.targets.android_resources import AndroidResources
from pants.contrib.android.targets.android_target import AndroidTarget
class TestAndroidBase(JvmToolTaskTestBase):
"""Base class for Android tests that provides some mock structures useful for testing.
:API: public
"""
@staticmethod
def android_manifest(package_name=None, target_sdk=None):
"""
:API: public
"""
package_name = package_name or 'org.pantsbuild.example.hello'
sdk = target_sdk or 19
manifest = textwrap.dedent(
"""<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="{}" >
<uses-sdk
android:minSdkVersion="8"
android:targetSdkVersion="{}" />
</manifest>
""".format(package_name, sdk))
return manifest
@contextmanager
def android_target(self, target_name=None, package_name=None, target_sdk=None, dependencies=None,
target_type=AndroidTarget, **kwargs):
"""Represent an Android target.
:API: public
"""
with temporary_file() as manifest:
manifest.write(self.android_manifest(package_name=package_name, target_sdk=target_sdk))
manifest.close()
target_name = target_name or 'target'
deps = dependencies or []
target = self.make_target(spec=':{}'.format(target_name),
target_type=target_type,
manifest=manifest.name,
dependencies=deps,
**kwargs)
yield target
@contextmanager
def android_binary(self, target_name=None, dependencies=None, package_name=None, target_sdk=None):
"""Represent an android_binary target.
:API: public
"""
with self.android_target(target_name=target_name or 'binary',
dependencies=dependencies,
package_name=package_name,
target_sdk=target_sdk,
target_type=AndroidBinary) as binary:
yield binary
@contextmanager
def android_resources(self, target_name=None, dependencies=None, package_name=None):
"""Represent an android_resources target.
:API: public
"""
with temporary_dir() as temp:
with self.android_target(target_name=target_name or 'resources',
dependencies=dependencies,
resource_dir=temp,
package_name=package_name,
target_type=AndroidResources) as resources:
yield resources
@contextmanager
def android_library(self, target_name=None, libraries=None, include_patterns=None,
exclude_patterns=None, dependencies=None, package_name=None):
"""Represent an android_library target.
:API: public
"""
with self.android_target(target_name=target_name or 'library',
libraries=libraries,
include_patterns=include_patterns,
exclude_patterns=exclude_patterns,
dependencies=dependencies,
package_name=package_name,
target_type=AndroidLibrary) as library:
yield library
@contextmanager
def distribution(installed_sdks=('18', '19'),
installed_build_tools=('19.1.0', '20.0.0'),
files=('android.jar',),
executables=('aapt', 'zipalign')):
"""Mock Android SDK Distribution.
:API: public
:param tuple[strings] installed_sdks: SDK versions of the files being mocked.
:param tuple[strings] installed_build_tools: Build tools version of any tools.
:param tuple[strings] files: The files are to mock non-executables and one will be created for
each installed_sdks version.
:param tuple[strings] executables: Executables are any required tools and one is created for
each installed_build_tools version.
"""
with temporary_dir() as sdk:
for sdk_version in installed_sdks:
for android_file in files:
touch(os.path.join(sdk, 'platforms', 'android-' + sdk_version, android_file))
for version in installed_build_tools:
for exe in maybe_list(executables or ()):
path = os.path.join(sdk, 'build-tools', version, exe)
touch(path)
chmod_plus_x(path)
dx_path = os.path.join(sdk, 'build-tools', version, 'lib/dx.jar')
touch(dx_path)
yield sdk
| apache-2.0 |
vincent-tr/rpi-js-os | ext/libcxx-5.0/libcxx/utils/libcxx/sym_check/extract.py | 8 | 6392 | # -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80:
#===----------------------------------------------------------------------===##
#
# The LLVM Compiler Infrastructure
#
# This file is dual licensed under the MIT and the University of Illinois Open
# Source Licenses. See LICENSE.TXT for details.
#
#===----------------------------------------------------------------------===##
"""
extract - A set of function that extract symbol lists from shared libraries.
"""
import distutils.spawn
import sys
import re
import libcxx.util
from libcxx.sym_check import util
extract_ignore_names = ['_init', '_fini']
class NMExtractor(object):
"""
NMExtractor - Extract symbol lists from libraries using nm.
"""
@staticmethod
def find_tool():
"""
Search for the nm executable and return the path.
"""
return distutils.spawn.find_executable('nm')
def __init__(self):
"""
Initialize the nm executable and flags that will be used to extract
symbols from shared libraries.
"""
self.nm_exe = self.find_tool()
if self.nm_exe is None:
# ERROR no NM found
print("ERROR: Could not find nm")
sys.exit(1)
self.flags = ['-P', '-g']
def extract(self, lib):
"""
Extract symbols from a library and return the results as a dict of
parsed symbols.
"""
cmd = [self.nm_exe] + self.flags + [lib]
out, _, exit_code = libcxx.util.executeCommandVerbose(cmd)
if exit_code != 0:
raise RuntimeError('Failed to run %s on %s' % (self.nm_exe, lib))
fmt_syms = (self._extract_sym(l)
for l in out.splitlines() if l.strip())
# Cast symbol to string.
final_syms = (repr(s) for s in fmt_syms if self._want_sym(s))
# Make unique and sort strings.
tmp_list = list(sorted(set(final_syms)))
# Cast string back to symbol.
return util.read_syms_from_list(tmp_list)
def _extract_sym(self, sym_str):
bits = sym_str.split()
# Everything we want has at least two columns.
if len(bits) < 2:
return None
new_sym = {
'name': bits[0],
'type': bits[1],
'is_defined': (bits[1].lower() != 'u')
}
new_sym['name'] = new_sym['name'].replace('@@', '@')
new_sym = self._transform_sym_type(new_sym)
# NM types which we want to save the size for.
if new_sym['type'] == 'OBJECT' and len(bits) > 3:
new_sym['size'] = int(bits[3], 16)
return new_sym
@staticmethod
def _want_sym(sym):
"""
Check that s is a valid symbol that we want to keep.
"""
if sym is None or len(sym) < 2:
return False
if sym['name'] in extract_ignore_names:
return False
bad_types = ['t', 'b', 'r', 'd', 'w']
return (sym['type'] not in bad_types
and sym['name'] not in ['__bss_start', '_end', '_edata'])
@staticmethod
def _transform_sym_type(sym):
"""
Map the nm single letter output for type to either FUNC or OBJECT.
If the type is not recognized it is left unchanged.
"""
func_types = ['T', 'W']
obj_types = ['B', 'D', 'R', 'V', 'S']
if sym['type'] in func_types:
sym['type'] = 'FUNC'
elif sym['type'] in obj_types:
sym['type'] = 'OBJECT'
return sym
class ReadElfExtractor(object):
"""
ReadElfExtractor - Extract symbol lists from libraries using readelf.
"""
@staticmethod
def find_tool():
"""
Search for the readelf executable and return the path.
"""
return distutils.spawn.find_executable('readelf')
def __init__(self):
"""
Initialize the readelf executable and flags that will be used to
extract symbols from shared libraries.
"""
self.tool = self.find_tool()
if self.tool is None:
# ERROR no NM found
print("ERROR: Could not find readelf")
sys.exit(1)
self.flags = ['--wide', '--symbols']
def extract(self, lib):
"""
Extract symbols from a library and return the results as a dict of
parsed symbols.
"""
cmd = [self.tool] + self.flags + [lib]
out, _, exit_code = libcxx.util.executeCommandVerbose(cmd)
if exit_code != 0:
raise RuntimeError('Failed to run %s on %s' % (self.nm_exe, lib))
dyn_syms = self.get_dynsym_table(out)
return self.process_syms(dyn_syms)
def process_syms(self, sym_list):
new_syms = []
for s in sym_list:
parts = s.split()
if not parts:
continue
assert len(parts) == 7 or len(parts) == 8 or len(parts) == 9
if len(parts) == 7:
continue
new_sym = {
'name': parts[7],
'size': int(parts[2]),
'type': parts[3],
'is_defined': (parts[6] != 'UND')
}
assert new_sym['type'] in ['OBJECT', 'FUNC', 'NOTYPE']
if new_sym['name'] in extract_ignore_names:
continue
if new_sym['type'] == 'NOTYPE':
continue
if new_sym['type'] == 'FUNC':
del new_sym['size']
new_syms += [new_sym]
return new_syms
def get_dynsym_table(self, out):
lines = out.splitlines()
start = -1
end = -1
for i in range(len(lines)):
if lines[i].startswith("Symbol table '.dynsym'"):
start = i + 2
if start != -1 and end == -1 and not lines[i].strip():
end = i + 1
assert start != -1
if end == -1:
end = len(lines)
return lines[start:end]
def extract_symbols(lib_file):
"""
Extract and return a list of symbols extracted from a dynamic library.
The symbols are extracted using NM. They are then filtered and formated.
Finally they symbols are made unique.
"""
if ReadElfExtractor.find_tool():
extractor = ReadElfExtractor()
else:
extractor = NMExtractor()
return extractor.extract(lib_file)
| gpl-3.0 |
praemdonck/micropython | tests/basics/int_big_and3.py | 61 | 2185 | # test - +
print( -97989513389222316022151446562729620153292831887555425160965597396
& 23716683549865351578586448630079789776107310103486834795830390982)
print( -53817081128841898634258263553430908085326601592682411889506742059
& 37042558948907407488299113387826240429667200950043601129661240876)
print( -26167512042587370698808974207700979337713004510730289760097826496
& 98456276326770292376138852628141531773120376436197321310863125849)
print( -21085380307304977067262070503651827226504797285572981274069266136
& 15928222825828272388778130358888206480162413547887287646273147570)
print( -40827393422334167255488276244226338235131323044408420081160772273
& 63815443187857978125545555033672525708399848575557475462799643340)
print( -5181013159871685724135944379095645225188360725917119022722046448
& 59734090450462480092384049604830976376887859531148103803093112493)
print( -283894311
& 86526825689187217371383854139783231460931720533100376593106943447)
print( -40019818573920230246248826511203818792007462193311949166285967147
& 9487909752)
# test + -
print( 97989513389222316022151446562729620153292831887555425160965597396
& -23716683549865351578586448630079789776107310103486834795830390982)
print( 53817081128841898634258263553430908085326601592682411889506742059
& -37042558948907407488299113387826240429667200950043601129661240876)
print( 26167512042587370698808974207700979337713004510730289760097826496
& -98456276326770292376138852628141531773120376436197321310863125849)
print( 21085380307304977067262070503651827226504797285572981274069266136
& -15928222825828272388778130358888206480162413547887287646273147570)
print( 40827393422334167255488276244226338235131323044408420081160772273
& -63815443187857978125545555033672525708399848575557475462799643340)
print( 5181013159871685724135944379095645225188360725917119022722046448
& -59734090450462480092384049604830976376887859531148103803093112493)
print( 283894311
& -86526825689187217371383854139783231460931720533100376593106943447)
print( 40019818573920230246248826511203818792007462193311949166285967147
& -9487909752)
| mit |
resmo/ansible | test/units/modules/cloud/linode_v4/test_linode_v4.py | 59 | 9814 | from __future__ import (absolute_import, division, print_function)
import json
import os
import sys
import pytest
linode_apiv4 = pytest.importorskip('linode_api4')
mandatory_py_version = pytest.mark.skipif(
sys.version_info < (2, 7),
reason='The linode_api4 dependency requires python2.7 or higher'
)
from linode_api4.errors import ApiError as LinodeApiError
from linode_api4 import LinodeClient
from ansible.modules.cloud.linode import linode_v4
from ansible.module_utils.linode import get_user_agent
from units.modules.utils import set_module_args
from units.compat import mock
def test_mandatory_state_is_validated(capfd):
with pytest.raises(SystemExit):
set_module_args({'label': 'foo'})
linode_v4.initialise_module()
out, err = capfd.readouterr()
results = json.loads(out)
assert all(txt in results['msg'] for txt in ('state', 'required'))
assert results['failed'] is True
def test_mandatory_label_is_validated(capfd):
with pytest.raises(SystemExit):
set_module_args({'state': 'present'})
linode_v4.initialise_module()
out, err = capfd.readouterr()
results = json.loads(out)
assert all(txt in results['msg'] for txt in ('label', 'required'))
assert results['failed'] is True
def test_mandatory_access_token_is_validated(default_args,
no_access_token_in_env,
capfd):
with pytest.raises(SystemExit):
set_module_args(default_args)
linode_v4.initialise_module()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed'] is True
assert all(txt in results['msg'] for txt in (
'missing',
'required',
'access_token',
))
def test_mandatory_access_token_passed_in_env(default_args,
access_token):
set_module_args(default_args)
try:
module = linode_v4.initialise_module()
except SystemExit:
pytest.fail("'access_token' is passed in environment")
now_set_token = module.params['access_token']
assert now_set_token == os.environ['LINODE_ACCESS_TOKEN']
def test_mandatory_access_token_passed_in_as_parameter(default_args,
no_access_token_in_env):
default_args.update({'access_token': 'foo'})
set_module_args(default_args)
try:
module = linode_v4.initialise_module()
except SystemExit:
pytest.fail("'access_token' is passed in as parameter")
assert module.params['access_token'] == 'foo'
def test_instance_by_label_cannot_authenticate(capfd, access_token,
default_args):
set_module_args(default_args)
module = linode_v4.initialise_module()
client = LinodeClient(module.params['access_token'])
target = 'linode_api4.linode_client.LinodeGroup.instances'
with mock.patch(target, side_effect=LinodeApiError('foo')):
with pytest.raises(SystemExit):
linode_v4.maybe_instance_from_label(module, client)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed'] is True
assert 'Unable to query the Linode API' in results['msg']
def test_no_instances_found_with_label_gives_none(default_args,
access_token):
set_module_args(default_args)
module = linode_v4.initialise_module()
client = LinodeClient(module.params['access_token'])
target = 'linode_api4.linode_client.LinodeGroup.instances'
with mock.patch(target, return_value=[]):
result = linode_v4.maybe_instance_from_label(module, client)
assert result is None
def test_optional_region_is_validated(default_args, capfd, access_token):
default_args.update({'type': 'foo', 'image': 'bar'})
set_module_args(default_args)
with pytest.raises(SystemExit):
linode_v4.initialise_module()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed'] is True
assert all(txt in results['msg'] for txt in (
'required',
'together',
'region'
))
def test_optional_type_is_validated(default_args, capfd, access_token):
default_args.update({'region': 'foo', 'image': 'bar'})
set_module_args(default_args)
with pytest.raises(SystemExit):
linode_v4.initialise_module()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed'] is True
assert all(txt in results['msg'] for txt in (
'required',
'together',
'type'
))
def test_optional_image_is_validated(default_args, capfd, access_token):
default_args.update({'type': 'foo', 'region': 'bar'})
set_module_args(default_args)
with pytest.raises(SystemExit):
linode_v4.initialise_module()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed'] is True
assert all(txt in results['msg'] for txt in (
'required',
'together',
'image'
))
def test_instance_already_created(default_args,
mock_linode,
capfd,
access_token):
default_args.update({
'type': 'foo',
'region': 'bar',
'image': 'baz'
})
set_module_args(default_args)
target = 'linode_api4.linode_client.LinodeGroup.instances'
with mock.patch(target, return_value=[mock_linode]):
with pytest.raises(SystemExit) as sys_exit_exc:
linode_v4.main()
assert sys_exit_exc.value.code == 0
out, err = capfd.readouterr()
results = json.loads(out)
assert results['changed'] is False
assert 'root_password' not in results['instance']
assert (
results['instance']['label'] ==
mock_linode._raw_json['label']
)
def test_instance_to_be_created_without_root_pass(default_args,
mock_linode,
capfd,
access_token):
default_args.update({
'type': 'foo',
'region': 'bar',
'image': 'baz'
})
set_module_args(default_args)
target = 'linode_api4.linode_client.LinodeGroup.instances'
with mock.patch(target, return_value=[]):
with pytest.raises(SystemExit) as sys_exit_exc:
target = 'linode_api4.linode_client.LinodeGroup.instance_create'
with mock.patch(target, return_value=(mock_linode, 'passw0rd')):
linode_v4.main()
assert sys_exit_exc.value.code == 0
out, err = capfd.readouterr()
results = json.loads(out)
assert results['changed'] is True
assert (
results['instance']['label'] ==
mock_linode._raw_json['label']
)
assert results['instance']['root_pass'] == 'passw0rd'
def test_instance_to_be_created_with_root_pass(default_args,
mock_linode,
capfd,
access_token):
default_args.update({
'type': 'foo',
'region': 'bar',
'image': 'baz',
'root_pass': 'passw0rd',
})
set_module_args(default_args)
target = 'linode_api4.linode_client.LinodeGroup.instances'
with mock.patch(target, return_value=[]):
with pytest.raises(SystemExit) as sys_exit_exc:
target = 'linode_api4.linode_client.LinodeGroup.instance_create'
with mock.patch(target, return_value=mock_linode):
linode_v4.main()
assert sys_exit_exc.value.code == 0
out, err = capfd.readouterr()
results = json.loads(out)
assert results['changed'] is True
assert (
results['instance']['label'] ==
mock_linode._raw_json['label']
)
assert 'root_pass' not in results['instance']
def test_instance_to_be_deleted(default_args,
mock_linode,
capfd,
access_token):
default_args.update({'state': 'absent'})
set_module_args(default_args)
target = 'linode_api4.linode_client.LinodeGroup.instances'
with mock.patch(target, return_value=[mock_linode]):
with pytest.raises(SystemExit) as sys_exit_exc:
linode_v4.main()
assert sys_exit_exc.value.code == 0
out, err = capfd.readouterr()
results = json.loads(out)
assert results['changed'] is True
assert (
results['instance']['label'] ==
mock_linode._raw_json['label']
)
def test_instance_already_deleted_no_change(default_args,
mock_linode,
capfd,
access_token):
default_args.update({'state': 'absent'})
set_module_args(default_args)
target = 'linode_api4.linode_client.LinodeGroup.instances'
with mock.patch(target, return_value=[]):
with pytest.raises(SystemExit) as sys_exit_exc:
linode_v4.main()
assert sys_exit_exc.value.code == 0
out, err = capfd.readouterr()
results = json.loads(out)
assert results['changed'] is False
assert results['instance'] == {}
def test_user_agent_created_properly():
try:
from ansible.module_utils.ansible_release import (
__version__ as ansible_version
)
except ImportError:
ansible_version = 'unknown'
expected_user_agent = 'Ansible-linode_v4_module/%s' % ansible_version
assert expected_user_agent == get_user_agent('linode_v4_module')
| gpl-3.0 |
HydrelioxGitHub/home-assistant | homeassistant/components/bloomsky/__init__.py | 10 | 2243 | """Support for BloomSky weather station."""
from datetime import timedelta
import logging
from aiohttp.hdrs import AUTHORIZATION
import requests
import voluptuous as vol
from homeassistant.const import CONF_API_KEY
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
BLOOMSKY = None
BLOOMSKY_TYPE = ['camera', 'binary_sensor', 'sensor']
DOMAIN = 'bloomsky'
# The BloomSky only updates every 5-8 minutes as per the API spec so there's
# no point in polling the API more frequently
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=300)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_API_KEY): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the BloomSky component."""
api_key = config[DOMAIN][CONF_API_KEY]
global BLOOMSKY
try:
BLOOMSKY = BloomSky(api_key)
except RuntimeError:
return False
for component in BLOOMSKY_TYPE:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
class BloomSky:
"""Handle all communication with the BloomSky API."""
# API documentation at http://weatherlution.com/bloomsky-api/
API_URL = 'http://api.bloomsky.com/api/skydata'
def __init__(self, api_key):
"""Initialize the BookSky."""
self._api_key = api_key
self.devices = {}
_LOGGER.debug("Initial BloomSky device load...")
self.refresh_devices()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def refresh_devices(self):
"""Use the API to retrieve a list of devices."""
_LOGGER.debug("Fetching BloomSky update")
response = requests.get(
self.API_URL, headers={AUTHORIZATION: self._api_key}, timeout=10)
if response.status_code == 401:
raise RuntimeError("Invalid API_KEY")
if response.status_code != 200:
_LOGGER.error("Invalid HTTP response: %s", response.status_code)
return
# Create dictionary keyed off of the device unique id
self.devices.update({
device['DeviceID']: device for device in response.json()
})
| apache-2.0 |
peiyuwang/pants | src/python/pants/build_graph/intermediate_target_factory.py | 5 | 2572 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from hashlib import sha1
import six
from pants.base.exceptions import TargetDefinitionException
from pants.build_graph.address import Address
from pants.util.meta import AbstractClass
def hash_target(address, suffix):
hasher = sha1()
hasher.update(address)
hasher.update(suffix)
return hasher.hexdigest()
class IntermediateTargetFactoryBase(AbstractClass):
"""Convenience factory which constructs an intermediate target with the appropriate attributes."""
_targets = set()
class ExpectedAddressError(TargetDefinitionException):
"""Thrown if an object that is not an address is used as the dependency spec."""
@classmethod
def reset(cls):
cls._targets.clear()
def __init__(self, parse_context):
self._parse_context = parse_context
@property
def extra_target_arguments(self):
"""Extra keyword arguments to pass to the target constructor."""
return {}
def _create_intermediate_target(self, address, suffix):
"""
:param string address: A target address.
:param string suffix: A string used as a suffix of the intermediate target name.
:returns: The address of a synthetic intermediary target.
"""
if not isinstance(address, six.string_types):
raise self.ExpectedAddressError("Expected string address argument, got type {type}"
.format(type=type(address)))
address = Address.parse(address, self._parse_context.rel_path)
# NB(gmalmquist): Ideally there should be a way to indicate that these targets are synthetic
# and shouldn't show up in `./pants list` etc, because we really don't want people to write
# handwritten dependencies on them. For now just give them names containing "-unstable-" as a
# hint.
hash_str = hash_target(str(address), suffix)
name = '{name}-unstable-{suffix}-{index}'.format(
name=address.target_name,
suffix=suffix.replace(' ', '.'),
index=hash_str,
)
if (name, self._parse_context.rel_path) not in self._targets:
self._parse_context.create_object(
'target',
name=name,
dependencies=[address.spec],
**self.extra_target_arguments
)
self._targets.add((name, self._parse_context.rel_path))
return ':{}'.format(name)
| apache-2.0 |
projectcalico/calico-nova | nova/api/openstack/compute/plugins/v3/server_groups.py | 6 | 6218 | # Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Server Group API Extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import server_groups as schema
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
import nova.exception
from nova.i18n import _
from nova.i18n import _LE
from nova import objects
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
ALIAS = "os-server-groups"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def _authorize_context(req):
context = req.environ['nova.context']
authorize(context)
return context
class ServerGroupController(wsgi.Controller):
"""The Server group API controller for the OpenStack API."""
def _format_server_group(self, context, group):
# the id field has its value as the uuid of the server group
# There is no 'uuid' key in server_group seen by clients.
# In addition, clients see policies as a ["policy-name"] list;
# and they see members as a ["server-id"] list.
server_group = {}
server_group['id'] = group.uuid
server_group['name'] = group.name
server_group['policies'] = group.policies or []
# NOTE(danms): This has been exposed to the user, but never used.
# Since we can't remove it, just make sure it's always empty.
server_group['metadata'] = {}
members = []
if group.members:
# Display the instances that are not deleted.
filters = {'uuid': group.members, 'deleted': False}
instances = objects.InstanceList.get_by_filters(
context, filters=filters)
members = [instance.uuid for instance in instances]
server_group['members'] = members
return server_group
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data about the given server group."""
context = _authorize_context(req)
try:
sg = objects.InstanceGroup.get_by_uuid(context, id)
except nova.exception.InstanceGroupNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return {'server_group': self._format_server_group(context, sg)}
@wsgi.response(204)
@extensions.expected_errors(404)
def delete(self, req, id):
"""Delete an server group."""
context = _authorize_context(req)
try:
sg = objects.InstanceGroup.get_by_uuid(context, id)
except nova.exception.InstanceGroupNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
quotas = objects.Quotas()
project_id, user_id = objects.quotas.ids_from_server_group(context, sg)
try:
# We have to add the quota back to the user that created
# the server group
quotas.reserve(context, project_id=project_id,
user_id=user_id, server_groups=-1)
except Exception:
quotas = None
LOG.exception(_LE("Failed to update usages deallocating "
"server group"))
try:
sg.destroy()
except nova.exception.InstanceGroupNotFound as e:
if quotas:
quotas.rollback()
raise webob.exc.HTTPNotFound(explanation=e.format_message())
if quotas:
quotas.commit()
@extensions.expected_errors(())
def index(self, req):
"""Returns a list of server groups."""
context = _authorize_context(req)
project_id = context.project_id
if 'all_projects' in req.GET and context.is_admin:
sgs = objects.InstanceGroupList.get_all(context)
else:
sgs = objects.InstanceGroupList.get_by_project_id(
context, project_id)
limited_list = common.limited(sgs.objects, req)
result = [self._format_server_group(context, group)
for group in limited_list]
return {'server_groups': result}
@extensions.expected_errors((400, 403))
@validation.schema(schema.create)
def create(self, req, body):
"""Creates a new server group."""
context = _authorize_context(req)
quotas = objects.Quotas()
try:
quotas.reserve(context, project_id=context.project_id,
user_id=context.user_id, server_groups=1)
except nova.exception.OverQuota:
msg = _("Quota exceeded, too many server groups.")
raise exc.HTTPForbidden(explanation=msg)
vals = body['server_group']
sg = objects.InstanceGroup(context)
sg.project_id = context.project_id
sg.user_id = context.user_id
try:
sg.name = vals.get('name')
sg.policies = vals.get('policies')
sg.create()
except ValueError as e:
quotas.rollback()
raise exc.HTTPBadRequest(explanation=e)
quotas.commit()
return {'server_group': self._format_server_group(context, sg)}
class ServerGroups(extensions.V3APIExtensionBase):
"""Server group support."""
name = "ServerGroups"
alias = ALIAS
version = 1
def get_resources(self):
res = extensions.ResourceExtension(
ALIAS, controller=ServerGroupController(),
member_actions={"action": "POST", })
return [res]
def get_controller_extensions(self):
return []
| apache-2.0 |
matthew-brett/pyblio | Pyblio/ConfDir/GnomeUI.py | 2 | 1944 | from Pyblio import Config, Fields
from Pyblio.GnomeUI import Utils, Editor
import gtk
Config.define ('gnomeui/default', """ Graphical description of the
default field. """)
Config.define ('gnomeui/monospaced', """ A monospaced font, for native edition """)
def _text_get ():
v = Config.get ('base/fields').data
fields = [ x.name.lower() for x in v.values() if
x.type is Fields.Text or x.type is Fields.LongText ]
fields.sort ()
return fields
def _on_multiline_select (item, multi, user):
h = Config.get ('base/fields').data
for k, v in multi.items ():
if not h.has_key (k): continue
if v: h [k].widget = Editor.Text
else: h [k].widget = Editor.Entry
return True
Config.define ('gnomeui/multiline',
""" Fields displayed in a multi-line widget """,
Config.Dict (Config.Element (_text_get),
Config.Boolean ()),
hook = _on_multiline_select)
# --------------------------------------------------
Config.set ('gnomeui/monospaced',
gtk.gdk.Font ('-*-*-*-r-normal-*-*-*-*-*-c-*-iso8859-1'))
h = Config.get ('base/fields').data
Fields.AuthorGroup.widget = Editor.AuthorGroup
Fields.Text.widget = Editor.Entry
Fields.URL.widget = Editor.URL
Fields.Reference.widget = Editor.Reference
Fields.Date.widget = Editor.Date
Fields.Date.justification = gtk.JUSTIFY_RIGHT
for f, w in (('author', 150),
('editor', 150),
('title', 200),
('booktitle', 200),
('date', 50),
('-author/editor-', 150),
('-author/title-', 250)):
if not h.has_key (f): continue
h [f].width = w
Config.set ('gnomeui/default', (150, gtk.JUSTIFY_LEFT, Editor.Entry))
multi = {}
if h.has_key ('abstract'): multi ['abstract'] = 1
Config.set ('gnomeui/multiline', multi)
| gpl-2.0 |
ndardenne/pymatgen | pymatgen/io/abinit/tasks.py | 2 | 166549 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""This module provides functions and classes related to Task objects."""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import time
import datetime
import shutil
import collections
import abc
import copy
import yaml
import six
import numpy as np
from pprint import pprint
from itertools import product
from six.moves import map, zip, StringIO
from monty.dev import deprecated
from monty.string import is_string, list_strings
from monty.termcolor import colored
from monty.collections import AttrDict
from monty.functools import lazy_property, return_none_if_raise
from monty.json import MSONable
from monty.fnmatch import WildCard
from pymatgen.core.units import Memory
from pymatgen.serializers.json_coders import json_pretty_dump, pmg_serialize
from .utils import File, Directory, irdvars_for_ext, abi_splitext, FilepathFixer, Condition, SparseHistogram
from .qadapters import make_qadapter, QueueAdapter, QueueAdapterError
from . import qutils as qu
from .db import DBConnector
from .nodes import Status, Node, NodeError, NodeResults, NodeCorrections, FileNode, check_spectator
from . import abiinspect
from . import events
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"TaskManager",
"AbinitBuild",
"ParalHintsParser",
"ScfTask",
"NscfTask",
"RelaxTask",
"DdkTask",
"PhononTask",
"SigmaTask",
"OpticTask",
"AnaddbTask",
]
import logging
logger = logging.getLogger(__name__)
# Tools and helper functions.
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
def lennone(PropperOrNone):
if PropperOrNone is None:
return 0
else:
return len(PropperOrNone)
def nmltostring(nml):
"""Convert a dictionary representing a Fortran namelist into a string."""
if not isinstance(nml,dict):
raise ValueError("nml should be a dict !")
curstr = ""
for key,group in nml.items():
namelist = ["&" + key]
for k, v in group.items():
if isinstance(v, list) or isinstance(v, tuple):
namelist.append(k + " = " + ",".join(map(str, v)) + ",")
elif is_string(v):
namelist.append(k + " = '" + str(v) + "',")
else:
namelist.append(k + " = " + str(v) + ",")
namelist.append("/")
curstr = curstr + "\n".join(namelist) + "\n"
return curstr
class TaskResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
JSON_SCHEMA["properties"] = {
"executable": {"type": "string", "required": True},
}
@classmethod
def from_node(cls, task):
"""Initialize an instance from an :class:`AbinitTask` instance."""
new = super(TaskResults, cls).from_node(task)
new.update(
executable=task.executable,
#executable_version:
#task_events=
pseudos=[p.as_dict() for p in task.input.pseudos],
#input=task.input
)
new.register_gridfs_files(
run_abi=(task.input_file.path, "t"),
run_abo=(task.output_file.path, "t"),
)
return new
class ParalConf(AttrDict):
"""
This object store the parameters associated to one
of the possible parallel configurations reported by ABINIT.
Essentially it is a dictionary whose values can also be accessed
as attributes. It also provides default values for selected keys
that might not be present in the ABINIT dictionary.
Example:
--- !Autoparal
info:
version: 1
autoparal: 1
max_ncpus: 108
configurations:
- tot_ncpus: 2 # Total number of CPUs
mpi_ncpus: 2 # Number of MPI processes.
omp_ncpus: 1 # Number of OMP threads (1 if not present)
mem_per_cpu: 10 # Estimated memory requirement per MPI processor in Megabytes.
efficiency: 0.4 # 1.0 corresponds to an "expected" optimal efficiency (strong scaling).
vars: { # Dictionary with the variables that should be added to the input.
varname1: varvalue1
varname2: varvalue2
}
-
...
For paral_kgb we have:
nproc npkpt npspinor npband npfft bandpp weight
108 1 1 12 9 2 0.25
108 1 1 108 1 2 27.00
96 1 1 24 4 1 1.50
84 1 1 12 7 2 0.25
"""
_DEFAULTS = {
"omp_ncpus": 1,
"mem_per_cpu": 0.0,
"vars": {}
}
def __init__(self, *args, **kwargs):
super(ParalConf, self).__init__(*args, **kwargs)
# Add default values if not already in self.
for k, v in self._DEFAULTS.items():
if k not in self:
self[k] = v
def __str__(self):
stream = StringIO()
pprint(self, stream=stream)
return stream.getvalue()
# TODO: Change name in abinit
# Remove tot_ncpus from Abinit
@property
def num_cores(self):
return self.mpi_procs * self.omp_threads
@property
def mem_per_proc(self):
return self.mem_per_cpu
@property
def mpi_procs(self):
return self.mpi_ncpus
@property
def omp_threads(self):
return self.omp_ncpus
@property
def speedup(self):
"""Estimated speedup reported by ABINIT."""
return self.efficiency * self.num_cores
@property
def tot_mem(self):
"""Estimated total memory in Mbs (computed from mem_per_proc)"""
return self.mem_per_proc * self.mpi_procs
class ParalHintsError(Exception):
"""Base error class for `ParalHints`."""
class ParalHintsParser(object):
Error = ParalHintsError
def __init__(self):
# Used to push error strings.
self._errors = collections.deque(maxlen=100)
def add_error(self, errmsg):
self._errors.append(errmsg)
def parse(self, filename):
"""
Read the `AutoParal` section (YAML format) from filename.
Assumes the file contains only one section.
"""
with abiinspect.YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag("!Autoparal")
try:
d = yaml.load(doc.text_notag)
return ParalHints(info=d["info"], confs=d["configurations"])
except:
import traceback
sexc = traceback.format_exc()
err_msg = "Wrong YAML doc:\n%s\n\nException:\n%s" % (doc.text, sexc)
self.add_error(err_msg)
logger.critical(err_msg)
raise self.Error(err_msg)
class ParalHints(collections.Iterable):
"""
Iterable with the hints for the parallel execution reported by ABINIT.
"""
Error = ParalHintsError
def __init__(self, info, confs):
self.info = info
self._confs = [ParalConf(**d) for d in confs]
@classmethod
def from_mpi_omp_lists(cls, mpi_procs, omp_threads):
"""
Build a list of Parallel configurations from two lists
containing the number of MPI processes and the number of OpenMP threads
i.e. product(mpi_procs, omp_threads).
The configuration have parallel efficiency set to 1.0 and no input variables.
Mainly used for preparing benchmarks.
"""
info = {}
confs = [ParalConf(mpi_ncpus=p, omp_ncpus=p, efficiency=1.0)
for p, t in product(mpi_procs, omp_threads)]
return cls(info, confs)
def __getitem__(self, key):
return self._confs[key]
def __iter__(self):
return self._confs.__iter__()
def __len__(self):
return self._confs.__len__()
def __repr__(self):
return "\n".join(str(conf) for conf in self)
def __str__(self):
return repr(self)
@lazy_property
def max_cores(self):
"""Maximum number of cores."""
return max(c.mpi_procs * c.omp_threads for c in self)
@lazy_property
def max_mem_per_proc(self):
"""Maximum memory per MPI process."""
return max(c.mem_per_proc for c in self)
@lazy_property
def max_speedup(self):
"""Maximum speedup."""
return max(c.speedup for c in self)
@lazy_property
def max_efficiency(self):
"""Maximum parallel efficiency."""
return max(c.efficiency for c in self)
@pmg_serialize
def as_dict(self, **kwargs):
return {"info": self.info, "confs": self._confs}
@classmethod
def from_dict(cls, d):
return cls(info=d["info"], confs=d["confs"])
def copy(self):
"""Shallow copy of self."""
return copy.copy(self)
def select_with_condition(self, condition, key=None):
"""
Remove all the configurations that do not satisfy the given condition.
Args:
condition: dict or :class:`Condition` object with operators expressed with a Mongodb-like syntax
key: Selects the sub-dictionary on which condition is applied, e.g. key="vars"
if we have to filter the configurations depending on the values in vars
"""
condition = Condition.as_condition(condition)
new_confs = []
for conf in self:
# Select the object on which condition is applied
obj = conf if key is None else AttrDict(conf[key])
add_it = condition(obj=obj)
#if key is "vars": print("conf", conf, "added:", add_it)
if add_it: new_confs.append(conf)
self._confs = new_confs
def sort_by_efficiency(self, reverse=True):
"""Sort the configurations in place. items with highest efficiency come first"""
self._confs.sort(key=lambda c: c.efficiency, reverse=reverse)
return self
def sort_by_speedup(self, reverse=True):
"""Sort the configurations in place. items with highest speedup come first"""
self._confs.sort(key=lambda c: c.speedup, reverse=reverse)
return self
def sort_by_mem_per_proc(self, reverse=False):
"""Sort the configurations in place. items with lowest memory per proc come first."""
# Avoid sorting if mem_per_cpu is not available.
if any(c.mem_per_proc > 0.0 for c in self):
self._confs.sort(key=lambda c: c.mem_per_proc, reverse=reverse)
return self
def multidimensional_optimization(self, priorities=("speedup", "efficiency")):
# Mapping property --> options passed to sparse_histogram
opts = dict(speedup=dict(step=1.0), efficiency=dict(step=0.1), mem_per_proc=dict(memory=1024))
#opts = dict(zip(priorities, bin_widths))
opt_confs = self._confs
for priority in priorities:
histogram = SparseHistogram(opt_confs, key=lambda c: getattr(c, priority), **opts[priority])
pos = 0 if priority == "mem_per_proc" else -1
opt_confs = histogram.values[pos]
#histogram.plot(show=True, savefig="hello.pdf")
return self.__class__(info=self.info, confs=opt_confs)
#def histogram_efficiency(self, step=0.1):
# """Returns a :class:`SparseHistogram` with configuration grouped by parallel efficiency."""
# return SparseHistogram(self._confs, key=lambda c: c.efficiency, step=step)
#def histogram_speedup(self, step=1.0):
# """Returns a :class:`SparseHistogram` with configuration grouped by parallel speedup."""
# return SparseHistogram(self._confs, key=lambda c: c.speedup, step=step)
#def histogram_memory(self, step=1024):
# """Returns a :class:`SparseHistogram` with configuration grouped by memory."""
# return SparseHistogram(self._confs, key=lambda c: c.speedup, step=step)
#def filter(self, qadapter):
# """Return a new list of configurations that can be executed on the `QueueAdapter` qadapter."""
# new_confs = [pconf for pconf in self if qadapter.can_run_pconf(pconf)]
# return self.__class__(info=self.info, confs=new_confs)
def get_ordered_with_policy(self, policy, max_ncpus):
"""
Sort and return a new list of configurations ordered according to the :class:`TaskPolicy` policy.
"""
# Build new list since we are gonna change the object in place.
hints = self.__class__(self.info, confs=[c for c in self if c.num_cores <= max_ncpus])
# First select the configurations satisfying the condition specified by the user (if any)
bkp_hints = hints.copy()
if policy.condition:
logger.info("Applying condition %s" % str(policy.condition))
hints.select_with_condition(policy.condition)
# Undo change if no configuration fullfills the requirements.
if not hints:
hints = bkp_hints
logger.warning("Empty list of configurations after policy.condition")
# Now filter the configurations depending on the values in vars
bkp_hints = hints.copy()
if policy.vars_condition:
logger.info("Applying vars_condition %s" % str(policy.vars_condition))
hints.select_with_condition(policy.vars_condition, key="vars")
# Undo change if no configuration fullfills the requirements.
if not hints:
hints = bkp_hints
logger.warning("Empty list of configurations after policy.vars_condition")
if len(policy.autoparal_priorities) == 1:
# Example: hints.sort_by_speedup()
if policy.autoparal_priorities[0] in ['efficiency', 'speedup', 'mem_per_proc']:
getattr(hints, "sort_by_" + policy.autoparal_priorities[0])()
elif isinstance(policy.autoparal_priorities[0], collections.Mapping):
if policy.autoparal_priorities[0]['meta_priority'] == 'highest_speedup_minimum_efficiency_cutoff':
min_efficiency = policy.autoparal_priorities[0].get('minimum_efficiency', 1.0)
hints.select_with_condition({'efficiency': {'$gte': min_efficiency}})
hints.sort_by_speedup()
else:
hints = hints.multidimensional_optimization(priorities=policy.autoparal_priorities)
if len(hints) == 0: raise ValueError("len(hints) == 0")
#TODO: make sure that num_cores == 1 is never selected when we have more than one configuration
#if len(hints) > 1:
# hints.select_with_condition(dict(num_cores={"$eq": 1)))
# Return final (orderded ) list of configurations (best first).
return hints
class TaskPolicy(object):
"""
This object stores the parameters used by the :class:`TaskManager` to
create the submission script and/or to modify the ABINIT variables
governing the parallel execution. A `TaskPolicy` object contains
a set of variables that specify the launcher, as well as the options
and the conditions used to select the optimal configuration for the parallel run
"""
@classmethod
def as_policy(cls, obj):
"""
Converts an object obj into a `:class:`TaskPolicy. Accepts:
* None
* TaskPolicy
* dict-like object
"""
if obj is None:
# Use default policy.
return TaskPolicy()
else:
if isinstance(obj, cls):
return obj
elif isinstance(obj, collections.Mapping):
return cls(**obj)
else:
raise TypeError("Don't know how to convert type %s to %s" % (type(obj), cls))
@classmethod
def autodoc(cls):
return """
autoparal: # (integer). 0 to disable the autoparal feature (DEFAULT: 1 i.e. autoparal is on)
condition: # condition used to filter the autoparal configurations (Mongodb-like syntax).
# DEFAULT: empty i.e. ignored.
vars_condition: # Condition used to filter the list of ABINIT variables reported by autoparal
# (Mongodb-like syntax). DEFAULT: empty i.e. ignored.
frozen_timeout: # A job is considered frozen and its status is set to ERROR if no change to
# the output file has been done for `frozen_timeout` seconds. Accepts int with seconds or
# string in slurm form i.e. days-hours:minutes:seconds. DEFAULT: 1 hour.
precedence: # Under development.
autoparal_priorities: # Under development.
"""
def __init__(self, **kwargs):
"""
See autodoc
"""
self.autoparal = kwargs.pop("autoparal", 1)
self.condition = Condition(kwargs.pop("condition", {}))
self.vars_condition = Condition(kwargs.pop("vars_condition", {}))
self.precedence = kwargs.pop("precedence", "autoparal_conf")
self.autoparal_priorities = kwargs.pop("autoparal_priorities", ["speedup"])
#self.autoparal_priorities = kwargs.pop("autoparal_priorities", ["speedup", "efficiecy", "memory"]
# TODO frozen_timeout could be computed as a fraction of the timelimit of the qadapter!
self.frozen_timeout = qu.slurm_parse_timestr(kwargs.pop("frozen_timeout", "0-1:00:00"))
if kwargs:
raise ValueError("Found invalid keywords in policy section:\n %s" % str(kwargs.keys()))
# Consistency check.
if self.precedence not in ("qadapter", "autoparal_conf"):
raise ValueError("Wrong value for policy.precedence, should be qadapter or autoparal_conf")
def __str__(self):
lines = []
app = lines.append
for k, v in self.__dict__.items():
if k.startswith("_"): continue
app("%s: %s" % (k, v))
return "\n".join(lines)
class ManagerIncreaseError(Exception):
"""
Exception raised by the manager if the increase request failed
"""
class FixQueueCriticalError(Exception):
"""
error raised when an error could not be fixed at the task level
"""
# Global variable used to store the task manager returned by `from_user_config`.
_USER_CONFIG_TASKMANAGER = None
class TaskManager(MSONable):
"""
A `TaskManager` is responsible for the generation of the job script and the submission
of the task, as well as for the specification of the parameters passed to the resource manager
(e.g. Slurm, PBS ...) and/or the run-time specification of the ABINIT variables governing the parallel execution.
A `TaskManager` delegates the generation of the submission script and the submission of the task to the :class:`QueueAdapter`.
A `TaskManager` has a :class:`TaskPolicy` that governs the specification of the parameters for the parallel executions.
Ideally, the TaskManager should be the **main entry point** used by the task to deal with job submission/optimization
"""
YAML_FILE = "manager.yml"
USER_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")
ENTRIES = {"policy", "qadapters", "db_connector", "batch_adapter"}
@classmethod
def autodoc(cls):
from .db import DBConnector
s = """
# TaskManager configuration file (YAML Format)
policy:
# Dictionary with options used to control the execution of the tasks.
qadapters:
# List of qadapters objects (mandatory)
- # qadapter_1
- # qadapter_2
db_connector:
# Connection to MongoDB database (optional)
batch_adapter:
# Adapter used to submit flows with batch script. (optional)
##########################################
# Individual entries are documented below:
##########################################
"""
s += "policy: " + TaskPolicy.autodoc() + "\n"
s += "qadapter: " + QueueAdapter.autodoc() + "\n"
#s += "db_connector: " + DBConnector.autodoc()
return s
@classmethod
def from_user_config(cls):
"""
Initialize the :class:`TaskManager` from the YAML file 'manager.yaml'.
Search first in the working directory and then in the abipy configuration directory.
Raises:
RuntimeError if file is not found.
"""
global _USER_CONFIG_TASKMANAGER
if _USER_CONFIG_TASKMANAGER is not None:
return _USER_CONFIG_TASKMANAGER
# Try in the current directory then in user configuration directory.
path = os.path.join(os.getcwd(), cls.YAML_FILE)
if not os.path.exists(path):
path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)
if not os.path.exists(path):
raise RuntimeError(colored(
"\nCannot locate %s neither in current directory nor in %s\n"
"\nCannot locate %s neither in current directory nor in %s\n"
"!!! PLEASE READ THIS: !!!\n"
"To use abipy to run jobs this file must be present\n"
"It provides a description of the cluster/computer you are running on\n"
"Examples are provided in abipy/data/managers." % (cls.YAML_FILE, path), color="red"))
_USER_CONFIG_TASKMANAGER = cls.from_file(path)
return _USER_CONFIG_TASKMANAGER
@classmethod
def from_file(cls, filename):
"""Read the configuration parameters from the Yaml file filename."""
try:
with open(filename, "r") as fh:
return cls.from_dict(yaml.load(fh))
except Exception as exc:
print("Error while reading TaskManager parameters from %s\n" % filename)
raise
@classmethod
def from_string(cls, s):
"""Create an instance from string s containing a YAML dictionary."""
return cls.from_dict(yaml.load(s))
@classmethod
def as_manager(cls, obj):
"""
Convert obj into TaskManager instance. Accepts string, filepath, dictionary, `TaskManager` object.
If obj is None, the manager is initialized from the user config file.
"""
if isinstance(obj, cls): return obj
if obj is None: return cls.from_user_config()
if is_string(obj):
if os.path.exists(obj):
return cls.from_file(obj)
else:
return cls.from_string(obj)
elif isinstance(obj, collections.Mapping):
return cls.from_dict(obj)
else:
raise TypeError("Don't know how to convert type %s to TaskManager" % type(obj))
@classmethod
def from_dict(cls, d):
"""Create an instance from a dictionary."""
return cls(**{k: v for k, v in d.items() if k in cls.ENTRIES})
@pmg_serialize
def as_dict(self):
return self._kwargs
def __init__(self, **kwargs):
"""
Args:
policy:None
qadapters:List of qadapters in YAML format
db_connector:Dictionary with data used to connect to the database (optional)
"""
# Keep a copy of kwargs
self._kwargs = copy.deepcopy(kwargs)
self.policy = TaskPolicy.as_policy(kwargs.pop("policy", None))
# Initialize database connector (if specified)
self.db_connector = DBConnector(**kwargs.pop("db_connector", {}))
# Build list of QAdapters. Neglect entry if priority == 0 or `enabled: no"
qads = []
for d in kwargs.pop("qadapters"):
if d.get("enabled", False): continue
qad = make_qadapter(**d)
if qad.priority > 0:
qads.append(qad)
elif qad.priority < 0:
raise ValueError("qadapter cannot have negative priority:\n %s" % qad)
if not qads:
raise ValueError("Received emtpy list of qadapters")
#if len(qads) != 1:
# raise NotImplementedError("For the time being multiple qadapters are not supported! Please use one adapter")
# Order qdapters according to priority.
qads = sorted(qads, key=lambda q: q.priority)
priorities = [q.priority for q in qads]
if len(priorities) != len(set(priorities)):
raise ValueError("Two or more qadapters have same priority. This is not allowed. Check taskmanager.yml")
self._qads, self._qadpos = tuple(qads), 0
# Initialize the qadapter for batch script submission.
d = kwargs.pop("batch_adapter", None)
self.batch_adapter = None
if d: self.batch_adapter = make_qadapter(**d)
#print("batch_adapter", self.batch_adapter)
if kwargs:
raise ValueError("Found invalid keywords in the taskmanager file:\n %s" % str(list(kwargs.keys())))
def to_shell_manager(self, mpi_procs=1):
"""
Returns a new `TaskManager` with the same parameters as self but replace the :class:`QueueAdapter`
with a :class:`ShellAdapter` with mpi_procs so that we can submit the job without passing through the queue.
"""
my_kwargs = copy.deepcopy(self._kwargs)
my_kwargs["policy"] = TaskPolicy(autoparal=0)
# On BlueGene we need at least two qadapters.
# One for running jobs on the computing nodes and another one
# for running small jobs on the fronted. These two qadapters
# will have different enviroments and different executables.
# If None of the q-adapters has qtype==shell, we change qtype to shell
# and we return a new Manager for sequential jobs with the same parameters as self.
# If the list contains a qadapter with qtype == shell, we ignore the remaining qadapters
# when we build the new Manager.
has_shell_qad = False
for d in my_kwargs["qadapters"]:
if d["queue"]["qtype"] == "shell": has_shell_qad = True
if has_shell_qad:
my_kwargs["qadapters"] = [d for d in my_kwargs["qadapters"] if d["queue"]["qtype"] == "shell"]
for d in my_kwargs["qadapters"]:
d["queue"]["qtype"] = "shell"
d["limits"]["min_cores"] = mpi_procs
d["limits"]["max_cores"] = mpi_procs
# If shell_runner is specified, replace mpi_runner with shell_runner
# in the script used to run jobs on the frontend.
# On same machines based on Slurm, indeed, mpirun/mpiexec is not available
# and jobs should be executed with `srun -n4 exec` when running on the computing nodes
# or with `exec` when running in sequential on the frontend.
if "job" in d and "shell_runner" in d["job"]:
shell_runner = d["job"]["shell_runner"]
#print("shell_runner:", shell_runner, type(shell_runner))
if not shell_runner or shell_runner == "None": shell_runner = ""
d["job"]["mpi_runner"] = shell_runner
#print("shell_runner:", shell_runner)
#print(my_kwargs)
new = self.__class__(**my_kwargs)
new.set_mpi_procs(mpi_procs)
return new
def new_with_fixed_mpi_omp(self, mpi_procs, omp_threads):
"""
Return a new `TaskManager` in which autoparal has been disabled.
The jobs will be executed with `mpi_procs` MPI processes and `omp_threads` OpenMP threads.
Useful for generating input files for benchmarks.
"""
new = self.deepcopy()
new.policy.autoparal = 0
new.set_mpi_procs(mpi_procs)
new.set_omp_threads(omp_threads)
return new
@property
def has_queue(self):
"""True if we are submitting jobs via a queue manager."""
return self.qadapter.QTYPE.lower() != "shell"
@property
def qads(self):
"""List of :class:`QueueAdapter` objects sorted according to priorities (highest comes first)"""
return self._qads
@property
def qadapter(self):
"""The qadapter used to submit jobs."""
return self._qads[self._qadpos]
def select_qadapter(self, pconfs):
"""
Given a list of parallel configurations, pconfs, this method select an `optimal` configuration
according to some criterion as well as the :class:`QueueAdapter` to use.
Args:
pconfs: :class:`ParalHints` object with the list of parallel configurations
Returns:
:class:`ParallelConf` object with the `optimal` configuration.
"""
# Order the list of configurations according to policy.
policy, max_ncpus = self.policy, self.max_cores
pconfs = pconfs.get_ordered_with_policy(policy, max_ncpus)
if policy.precedence == "qadapter":
# Try to run on the qadapter with the highest priority.
for qadpos, qad in enumerate(self.qads):
possible_pconfs = [pc for pc in pconfs if qad.can_run_pconf(pc)]
if qad.allocation == "nodes":
# Select the configuration divisible by nodes if possible.
for pconf in possible_pconfs:
if pconf.num_cores % qad.hw.cores_per_node == 0:
return self._use_qadpos_pconf(qadpos, pconf)
# Here we select the first one.
if possible_pconfs:
return self._use_qadpos_pconf(qadpos, possible_pconfs[0])
elif policy.precedence == "autoparal_conf":
# Try to run on the first pconf irrespectively of the priority of the qadapter.
for pconf in pconfs:
for qadpos, qad in enumerate(self.qads):
if qad.allocation == "nodes" and not pconf.num_cores % qad.hw.cores_per_node == 0:
continue # Ignore it. not very clean
if qad.can_run_pconf(pconf):
return self._use_qadpos_pconf(qadpos, pconf)
else:
raise ValueError("Wrong value of policy.precedence = %s" % policy.precedence)
# No qadapter could be found
raise RuntimeError("Cannot find qadapter for this run!")
def _use_qadpos_pconf(self, qadpos, pconf):
"""
This function is called when we have accepted the :class:`ParalConf` pconf.
Returns pconf
"""
self._qadpos = qadpos
# Change the number of MPI/OMP cores.
self.set_mpi_procs(pconf.mpi_procs)
if self.has_omp: self.set_omp_threads(pconf.omp_threads)
# Set memory per proc.
#FIXME: Fixer may have changed the memory per proc and should not be resetted by ParalConf
#self.set_mem_per_proc(pconf.mem_per_proc)
return pconf
def __str__(self):
"""String representation."""
lines = []
app = lines.append
#app("[Task policy]\n%s" % str(self.policy))
for i, qad in enumerate(self.qads):
app("[Qadapter %d]\n%s" % (i, str(qad)))
app("Qadapter selected: %d" % self._qadpos)
if self.has_db:
app("[MongoDB database]:")
app(str(self.db_connector))
return "\n".join(lines)
@property
def has_db(self):
"""True if we are using MongoDB database"""
return bool(self.db_connector)
@property
def has_omp(self):
"""True if we are using OpenMP parallelization."""
return self.qadapter.has_omp
@property
def num_cores(self):
"""Total number of CPUs used to run the task."""
return self.qadapter.num_cores
@property
def mpi_procs(self):
"""Number of MPI processes."""
return self.qadapter.mpi_procs
@property
def mem_per_proc(self):
"""Memory per MPI process."""
return self.qadapter.mem_per_proc
@property
def omp_threads(self):
"""Number of OpenMP threads"""
return self.qadapter.omp_threads
def deepcopy(self):
"""Deep copy of self."""
return copy.deepcopy(self)
def set_mpi_procs(self, mpi_procs):
"""Set the number of MPI processes to use."""
self.qadapter.set_mpi_procs(mpi_procs)
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMp threads to use."""
self.qadapter.set_omp_threads(omp_threads)
def set_mem_per_proc(self, mem_mb):
"""Set the memory (in Megabytes) per CPU."""
self.qadapter.set_mem_per_proc(mem_mb)
@property
def max_cores(self):
"""
Maximum number of cores that can be used.
This value is mainly used in the autoparal part to get the list of possible configurations.
"""
return max(q.hint_cores for q in self.qads)
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue,
returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
return self.qadapter.get_njobs_in_queue(username=username)
def cancel(self, job_id):
"""Cancel the job. Returns exit status."""
return self.qadapter.cancel(job_id)
def write_jobfile(self, task, **kwargs):
"""
Write the submission script. Return the path of the script
================ ============================================
kwargs Meaning
================ ============================================
exec_args List of arguments passed to task.executable.
Default: no arguments.
================ ============================================
"""
script = self.qadapter.get_script_str(
job_name=task.name,
launch_dir=task.workdir,
executable=task.executable,
qout_path=task.qout_file.path,
qerr_path=task.qerr_file.path,
stdin=task.files_file.path,
stdout=task.log_file.path,
stderr=task.stderr_file.path,
exec_args=kwargs.pop("exec_args", []),
)
# Write the script.
with open(task.job_file.path, "w") as fh:
fh.write(script)
task.job_file.chmod(0o740)
return task.job_file.path
def launch(self, task, **kwargs):
"""
Build the input files and submit the task via the :class:`Qadapter`
Args:
task: :class:`TaskObject`
Returns:
Process object.
"""
if task.status == task.S_LOCKED:
raise ValueError("You shall not submit a locked task!")
# Build the task
task.build()
# Pass information on the time limit to Abinit (we always assume ndtset == 1)
#if False and isinstance(task, AbinitTask):
if isinstance(task, AbinitTask):
args = kwargs.get("exec_args", [])
if args is None: args = []
args = args[:]
args.append("--timelimit %s" % qu.time2slurm(self.qadapter.timelimit))
kwargs["exec_args"] = args
logger.info("Will pass timelimit option to abinit %s:" % args)
# Write the submission script
script_file = self.write_jobfile(task, **kwargs)
# Submit the task and save the queue id.
try:
qjob, process = self.qadapter.submit_to_queue(script_file)
task.set_status(task.S_SUB, msg='submitted to queue')
task.set_qjob(qjob)
return process
except self.qadapter.MaxNumLaunchesError as exc:
# TODO: Here we should try to switch to another qadapter
# 1) Find a new parallel configuration in those stored in task.pconfs
# 2) Change the input file.
# 3) Regenerate the submission script
# 4) Relaunch
task.set_status(task.S_ERROR, msg="max_num_launches reached: %s" % str(exc))
raise
def get_collection(self, **kwargs):
"""Return the MongoDB collection used to store the results."""
return self.db_connector.get_collection(**kwargs)
def increase_mem(self):
# OLD
# with GW calculations in mind with GW mem = 10,
# the response fuction is in memory and not distributed
# we need to increase memory if jobs fail ...
# return self.qadapter.more_mem_per_proc()
try:
self.qadapter.more_mem_per_proc()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase mem')
def increase_ncpus(self):
"""
increase the number of cpus, first ask the current quadapter, if that one raises a QadapterIncreaseError
switch to the next qadapter. If all fail raise an ManagerIncreaseError
"""
try:
self.qadapter.more_cores()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase ncpu')
def increase_resources(self):
try:
self.qadapter.more_cores()
return
except QueueAdapterError:
pass
try:
self.qadapter.more_mem_per_proc()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase resources')
def exclude_nodes(self, nodes):
try:
self.qadapter.exclude_nodes(nodes=nodes)
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to exclude nodes')
def increase_time(self):
try:
self.qadapter.more_time()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase time')
class AbinitBuild(object):
"""
This object stores information on the options used to build Abinit
.. attribute:: info
String with build information as produced by `abinit -b`
.. attribute:: version
Abinit version number e.g 8.0.1 (string)
.. attribute:: has_netcdf
True if netcdf is enabled.
.. attribute:: has_etsfio
True if etsf-io is enabled.
.. attribute:: has_omp
True if OpenMP is enabled.
.. attribute:: has_mpi
True if MPI is enabled.
.. attribute:: has_mpiio
True if MPI-IO is supported.
"""
def __init__(self, workdir=None, manager=None):
manager = TaskManager.as_manager(manager).to_shell_manager(mpi_procs=1)
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
# Generate a shell script to execute `abinit -b`
stdout = os.path.join(workdir, "run.abo")
script = manager.qadapter.get_script_str(
job_name="abinit_b",
launch_dir=workdir,
executable="abinit",
qout_path=os.path.join(workdir, "queue.qout"),
qerr_path=os.path.join(workdir, "queue.qerr"),
#stdin=os.path.join(workdir, "run.files"),
stdout=stdout,
stderr=os.path.join(workdir, "run.err"),
exec_args=["-b"],
)
# Execute the script.
script_file = os.path.join(workdir, "job.sh")
with open(script_file, "wt") as fh:
fh.write(script)
qjob, process = manager.qadapter.submit_to_queue(script_file)
process.wait()
if process.returncode != 0:
logger.critical("Error while executing %s" % script_file)
with open(stdout, "r") as fh:
self.info = fh.read()
# info string has the following format.
"""
=== Build Information ===
Version : 8.0.1
Build target : x86_64_darwin15.0.0_gnu5.3
Build date : 20160122
=== Compiler Suite ===
C compiler : gnu
C++ compiler : gnuApple
Fortran compiler : gnu5.3
CFLAGS : -g -O2 -mtune=native -march=native
CXXFLAGS : -g -O2 -mtune=native -march=native
FCFLAGS : -g -ffree-line-length-none
FC_LDFLAGS :
=== Optimizations ===
Debug level : basic
Optimization level : standard
Architecture : unknown_unknown
=== Multicore ===
Parallel build : yes
Parallel I/O : yes
openMP support : no
GPU support : no
=== Connectors / Fallbacks ===
Connectors on : yes
Fallbacks on : yes
DFT flavor : libxc-fallback+atompaw-fallback+wannier90-fallback
FFT flavor : none
LINALG flavor : netlib
MATH flavor : none
TIMER flavor : abinit
TRIO flavor : netcdf+etsf_io-fallback
=== Experimental features ===
Bindings : @enable_bindings@
Exports : no
GW double-precision : yes
=== Bazaar branch information ===
Branch ID : gmatteo@gmac-20160112110440-lf6exhneqim9082h
Revision : 1226
Committed : 0
"""
self.has_netcdf = False
self.has_etsfio = False
self.has_omp = False
self.has_mpi, self.has_mpiio = False, False
def yesno2bool(line):
ans = line.split()[-1]
return dict(yes=True, no=False)[ans]
# Parse info.
for line in self.info.splitlines():
if "Version" in line: self.version = line.split()[-1]
if "TRIO flavor" in line:
self.has_netcdf = "netcdf" in line
self.has_etsfio = "etsf_io" in line
if "openMP support" in line: self.has_omp = yesno2bool(line)
if "Parallel build" in line: self.has_mpi = yesno2bool(line)
if "Parallel I/O" in line: self.has_mpiio = yesno2bool(line)
def __str__(self):
lines = []
app = lines.append
app("Abinit Build Information:")
app(" Abinit version: %s" % self.version)
app(" MPI: %s, MPI-IO: %s, OpenMP: %s" % (self.has_mpi, self.has_mpiio, self.has_omp))
app(" Netcdf: %s, ETSF-IO: %s" % (self.has_netcdf, self.has_etsfio))
return "\n".join(lines)
class FakeProcess(object):
"""
This object is attached to a :class:`Task` instance if the task has not been submitted
This trick allows us to simulate a process that is still running so that
we can safely poll task.process.
"""
def poll(self):
return None
def wait(self):
raise RuntimeError("Cannot wait a FakeProcess")
def communicate(self, input=None):
raise RuntimeError("Cannot communicate with a FakeProcess")
def kill(self):
raise RuntimeError("Cannot kill a FakeProcess")
@property
def returncode(self):
return None
class MyTimedelta(datetime.timedelta):
"""A customized version of timedelta whose __str__ method doesn't print microseconds."""
def __new__(cls, days, seconds, microseconds):
return datetime.timedelta.__new__(cls, days, seconds, microseconds)
def __str__(self):
"""Remove microseconds from timedelta default __str__"""
s = super(MyTimedelta, self).__str__()
microsec = s.find(".")
if microsec != -1: s = s[:microsec]
return s
@classmethod
def as_timedelta(cls, delta):
"""Convert delta into a MyTimedelta object."""
# Cannot monkey patch the __class__ and must pass through __new__ as the object is immutable.
if isinstance(delta, cls): return delta
return cls(delta.days, delta.seconds, delta.microseconds)
class TaskDateTimes(object):
"""
Small object containing useful :class:`datetime.datatime` objects associated to important events.
.. attributes:
init: initialization datetime
submission: submission datetime
start: Begin of execution.
end: End of execution.
"""
def __init__(self):
self.init = datetime.datetime.now()
self.submission, self.start, self.end = None, None, None
def __str__(self):
lines = []
app = lines.append
app("Initialization done on: %s" % self.init)
if self.submission is not None: app("Submitted on: %s" % self.submission)
if self.start is not None: app("Started on: %s" % self.start)
if self.end is not None: app("Completed on: %s" % self.end)
return "\n".join(lines)
def reset(self):
"""Reinitialize the counters."""
self = self.__class__()
def get_runtime(self):
""":class:`timedelta` with the run-time, None if the Task is not running"""
if self.start is None: return None
if self.end is None:
delta = datetime.datetime.now() - self.start
else:
delta = self.end - self.start
return MyTimedelta.as_timedelta(delta)
def get_time_inqueue(self):
"""
:class:`timedelta` with the time spent in the Queue, None if the Task is not running
.. note:
This value is always greater than the real value computed by the resource manager
as we start to count only when check_status sets the `Task` status to S_RUN.
"""
if self.submission is None: return None
if self.start is None:
delta = datetime.datetime.now() - self.submission
else:
delta = self.start - self.submission
# This happens when we read the exact start datetime from the ABINIT log file.
if delta.total_seconds() < 0: delta = datetime.timedelta(seconds=0)
return MyTimedelta.as_timedelta(delta)
class TaskError(NodeError):
"""Base Exception for :class:`Task` methods"""
class TaskRestartError(TaskError):
"""Exception raised while trying to restart the :class:`Task`."""
class Task(six.with_metaclass(abc.ABCMeta, Node)):
"""A Task is a node that performs some kind of calculation."""
# Use class attributes for TaskErrors so that we don't have to import them.
Error = TaskError
RestartError = TaskRestartError
# List of `AbinitEvent` subclasses that are tested in the check_status method.
# Subclasses should provide their own list if they need to check the converge status.
CRITICAL_EVENTS = []
# Prefixes for Abinit (input, output, temporary) files.
Prefix = collections.namedtuple("Prefix", "idata odata tdata")
pj = os.path.join
prefix = Prefix(pj("indata", "in"), pj("outdata", "out"), pj("tmpdata", "tmp"))
del Prefix, pj
def __init__(self, input, workdir=None, manager=None, deps=None):
"""
Args:
input: :class:`AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
deps: Dictionary specifying the dependency of this node.
None means that this Task has no dependency.
"""
# Init the node
super(Task, self).__init__()
self._input = input
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
# Handle possible dependencies.
if deps:
self.add_deps(deps)
# Date-time associated to submission, start and end.
self.datetimes = TaskDateTimes()
# Count the number of restarts.
self.num_restarts = 0
self._qjob = None
self.queue_errors = []
self.abi_errors = []
# two flags that provide, dynamically, information on the scaling behavious of a task. If any process of fixing
# finds none scaling behaviour, they should be switched. If a task type is clearly not scaling they should be
# swiched.
self.mem_scales = True
self.load_scales = True
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
In this case we just remove the process since Subprocess objects cannot be pickled.
This is the reason why we have to store the returncode in self._returncode instead
of using self.process.returncode.
"""
return {k: v for k, v in self.__dict__.items() if k not in ["_process"]}
#@check_spectator
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Files required for the execution.
self.input_file = File(os.path.join(self.workdir, "run.abi"))
self.output_file = File(os.path.join(self.workdir, "run.abo"))
self.files_file = File(os.path.join(self.workdir, "run.files"))
self.job_file = File(os.path.join(self.workdir, "job.sh"))
self.log_file = File(os.path.join(self.workdir, "run.log"))
self.stderr_file = File(os.path.join(self.workdir, "run.err"))
self.start_lockfile = File(os.path.join(self.workdir, "__startlock__"))
# This file is produced by Abinit if nprocs > 1 and MPI_ABORT.
self.mpiabort_file = File(os.path.join(self.workdir, "__ABI_MPIABORTFILE__"))
# Directories with input|output|temporary data.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
# stderr and output file of the queue manager. Note extensions.
self.qerr_file = File(os.path.join(self.workdir, "queue.qerr"))
self.qout_file = File(os.path.join(self.workdir, "queue.qout"))
def set_manager(self, manager):
"""Set the :class:`TaskManager` used to launch the Task."""
self.manager = manager.deepcopy()
@property
def work(self):
"""The :class:`Work` containing this `Task`."""
return self._work
def set_work(self, work):
"""Set the :class:`Work` associated to this `Task`."""
if not hasattr(self, "_work"):
self._work = work
else:
if self._work != work:
raise ValueError("self._work != work")
@property
def flow(self):
"""The :class:`Flow` containing this `Task`."""
return self.work.flow
@lazy_property
def pos(self):
"""The position of the task in the :class:`Flow`"""
for i, task in enumerate(self.work):
if self == task:
return self.work.pos, i
raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow))
@property
def pos_str(self):
"""String representation of self.pos"""
return "w" + str(self.pos[0]) + "_t" + str(self.pos[1])
@property
def num_launches(self):
"""
Number of launches performed. This number includes both possible ABINIT restarts
as well as possible launches done due to errors encountered with the resource manager
or the hardware/software."""
return sum(q.num_launches for q in self.manager.qads)
@property
def input(self):
"""AbinitInput object."""
return self._input
def get_inpvar(self, varname, default=None):
"""Return the value of the ABINIT variable varname, None if not present."""
return self.input.get(varname, default)
@deprecated(message="_set_inpvars is deprecated. Use set_vars")
def _set_inpvars(self, *args, **kwargs):
return self.set_vars(*args, **kwargs)
def set_vars(self, *args, **kwargs):
"""
Set the values of the ABINIT variables in the input file. Return dict with old values.
"""
kwargs.update(dict(*args))
old_values = {vname: self.input.get(vname) for vname in kwargs}
self.input.set_vars(**kwargs)
if kwargs or old_values:
self.history.info("Setting input variables: %s" % str(kwargs))
self.history.info("Old values: %s" % str(old_values))
return old_values
@property
def initial_structure(self):
"""Initial structure of the task."""
return self.input.structure
def make_input(self, with_header=False):
"""Construct the input file of the calculation."""
s = str(self.input)
if with_header: s = str(self) + "\n" + s
return s
def ipath_from_ext(self, ext):
"""
Returns the path of the input file with extension ext.
Use it when the file does not exist yet.
"""
return os.path.join(self.workdir, self.prefix.idata + "_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return os.path.join(self.workdir, self.prefix.odata + "_" + ext)
@abc.abstractproperty
def executable(self):
"""
Path to the executable associated to the task (internally stored in self._executable).
"""
def set_executable(self, executable):
"""Set the executable associate to this task."""
self._executable = executable
@property
def process(self):
try:
return self._process
except AttributeError:
# Attach a fake process so that we can poll it.
return FakeProcess()
@property
def is_completed(self):
"""True if the task has been executed."""
return self.status >= self.S_DONE
@property
def can_run(self):
"""The task can run if its status is < S_SUB and all the other dependencies (if any) are done!"""
all_ok = all(stat == self.S_OK for stat in self.deps_status)
return self.status < self.S_SUB and self.status != self.S_LOCKED and all_ok
#@check_spectator
def cancel(self):
"""Cancel the job. Returns 1 if job was cancelled."""
if self.queue_id is None: return 0
if self.status >= self.S_DONE: return 0
exit_status = self.manager.cancel(self.queue_id)
if exit_status != 0:
logger.warning("manager.cancel returned exit_status: %s" % exit_status)
return 0
# Remove output files and reset the status.
self.history.info("Job %s cancelled by user" % self.queue_id)
self.reset()
return 1
def with_fixed_mpi_omp(self, mpi_procs, omp_threads):
"""
Disable autoparal and force execution with `mpi_procs` MPI processes
and `omp_threads` OpenMP threads. Useful for generating benchmarks.
"""
manager = self.manager if hasattr(self, "manager") else self.flow.manager
self.manager = manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
#@check_spectator
def _on_done(self):
self.fix_ofiles()
#@check_spectator
def _on_ok(self):
# Fix output file names.
self.fix_ofiles()
# Get results
results = self.on_ok()
self.finalized = True
return results
#@check_spectator
def on_ok(self):
"""
This method is called once the `Task` has reached status S_OK.
Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0, message="Calling on_all_ok of the base class!")
#@check_spectator
def fix_ofiles(self):
"""
This method is called when the task reaches S_OK.
It changes the extension of particular output files
produced by Abinit so that the 'official' extension
is preserved e.g. out_1WF14 --> out_1WF
"""
filepaths = self.outdir.list_filepaths()
logger.info("in fix_ofiles with filepaths %s" % list(filepaths))
old2new = FilepathFixer().fix_paths(filepaths)
for old, new in old2new.items():
self.history.info("will rename old %s to new %s" % (old, new))
os.rename(old, new)
#@check_spectator
def _restart(self, submit=True):
"""
Called by restart once we have finished preparing the task for restarting.
Return:
True if task has been restarted
"""
self.set_status(self.S_READY, msg="Restarted on %s" % time.asctime())
# Increase the counter.
self.num_restarts += 1
self.history.info("Restarted, num_restarts %d" % self.num_restarts)
# Reset datetimes
self.datetimes.reset()
if submit:
# Remove the lock file
self.start_lockfile.remove()
# Relaunch the task.
fired = self.start()
if not fired: self.history.warning("Restart failed")
else:
fired = False
return fired
#@check_spectator
def restart(self):
"""
Restart the calculation. Subclasses should provide a concrete version that
performs all the actions needed for preparing the restart and then calls self._restart
to restart the task. The default implementation is empty.
Returns:
1 if job was restarted, 0 otherwise.
"""
logger.debug("Calling the **empty** restart method of the base class")
return 0
def poll(self):
"""Check if child process has terminated. Set and return returncode attribute."""
self._returncode = self.process.poll()
if self._returncode is not None:
self.set_status(self.S_DONE, "status set to Done")
return self._returncode
def wait(self):
"""Wait for child process to terminate. Set and return returncode attribute."""
self._returncode = self.process.wait()
self.set_status(self.S_DONE, "status set to Done")
return self._returncode
def communicate(self, input=None):
"""
Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a string to be sent to the
child process, or None, if no data should be sent to the child.
communicate() returns a tuple (stdoutdata, stderrdata).
"""
stdoutdata, stderrdata = self.process.communicate(input=input)
self._returncode = self.process.returncode
self.set_status(self.S_DONE, "status set to Done")
return stdoutdata, stderrdata
def kill(self):
"""Kill the child."""
self.process.kill()
self.set_status(self.S_ERROR, "status set to Error by task.kill")
self._returncode = self.process.returncode
@property
def returncode(self):
"""
The child return code, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
try:
return self._returncode
except AttributeError:
return 0
def reset(self):
"""
Reset the task status. Mainly used if we made a silly mistake in the initial
setup of the queue manager and we want to fix it and rerun the task.
Returns:
0 on success, 1 if reset failed.
"""
# Can only reset tasks that are done.
# One should be able to reset 'Submitted' tasks (sometimes, they are not in the queue
# and we want to restart them)
if self.status != self.S_SUB and self.status < self.S_DONE: return 1
# Remove output files otherwise the EventParser will think the job is still running
self.output_file.remove()
self.log_file.remove()
self.stderr_file.remove()
self.start_lockfile.remove()
self.qerr_file.remove()
self.qout_file.remove()
self.set_status(self.S_INIT, msg="Reset on %s" % time.asctime())
self.set_qjob(None)
return 0
@property
@return_none_if_raise(AttributeError)
def queue_id(self):
"""Queue identifier returned by the Queue manager. None if not set"""
return self.qjob.qid
@property
@return_none_if_raise(AttributeError)
def qname(self):
"""Queue name identifier returned by the Queue manager. None if not set"""
return self.qjob.qname
@property
def qjob(self):
return self._qjob
def set_qjob(self, qjob):
"""Set info on queue after submission."""
self._qjob = qjob
@property
def has_queue(self):
"""True if we are submitting jobs via a queue manager."""
return self.manager.qadapter.QTYPE.lower() != "shell"
@property
def num_cores(self):
"""Total number of CPUs used to run the task."""
return self.manager.num_cores
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self.manager.mpi_procs
@property
def omp_threads(self):
"""Number of CPUs used for OpenMP."""
return self.manager.omp_threads
@property
def mem_per_proc(self):
"""Memory per MPI process."""
return Memory(self.manager.mem_per_proc, "Mb")
@property
def status(self):
"""Gives the status of the task."""
return self._status
def lock(self, source_node):
"""Lock the task, source is the :class:`Node` that applies the lock."""
if self.status != self.S_INIT:
raise ValueError("Trying to lock a task with status %s" % self.status)
self._status = self.S_LOCKED
self.history.info("Locked by node %s", source_node)
def unlock(self, source_node, check_status=True):
"""
Unlock the task, set its status to `S_READY` so that the scheduler can submit it.
source_node is the :class:`Node` that removed the lock
Call task.check_status if check_status is True.
"""
if self.status != self.S_LOCKED:
raise RuntimeError("Trying to unlock a task with status %s" % self.status)
self._status = self.S_READY
if check_status: self.check_status()
self.history.info("Unlocked by %s", source_node)
#@check_spectator
def set_status(self, status, msg):
"""
Set and return the status of the task.
Args:
status: Status object or string representation of the status
msg: string with human-readable message used in the case of errors.
"""
# truncate string if it's long. msg will be logged in the object and we don't want to waste memory.
if len(msg) > 2000:
msg = msg[:2000]
msg += "\n... snip ...\n"
# Locked files must be explicitly unlocked
if self.status == self.S_LOCKED or status == self.S_LOCKED:
err_msg = (
"Locked files must be explicitly unlocked before calling set_status but\n"
"task.status = %s, input status = %s" % (self.status, status))
raise RuntimeError(err_msg)
status = Status.as_status(status)
changed = True
if hasattr(self, "_status"):
changed = (status != self._status)
self._status = status
if status == self.S_RUN:
# Set datetimes.start when the task enters S_RUN
if self.datetimes.start is None:
self.datetimes.start = datetime.datetime.now()
# Add new entry to history only if the status has changed.
if changed:
if status == self.S_SUB:
self.datetimes.submission = datetime.datetime.now()
self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s " % (
self.mpi_procs, self.omp_threads, self.mem_per_proc.to("Gb"), msg))
elif status == self.S_OK:
self.history.info("Task completed %s", msg)
elif status == self.S_ABICRITICAL:
self.history.info("Status set to S_ABI_CRITICAL due to: %s", msg)
else:
self.history.info("Status changed to %s. msg: %s", status, msg)
#######################################################
# The section belows contains callbacks that should not
# be executed if we are in spectator_mode
#######################################################
if status == self.S_DONE:
# Execute the callback
self._on_done()
if status == self.S_OK:
# Finalize the task.
if not self.finalized:
self._on_ok()
# here we remove the output files of the task and of its parents.
if self.gc is not None and self.gc.policy == "task":
self.clean_output_files()
self.send_signal(self.S_OK)
return status
def check_status(self):
"""
This function checks the status of the task by inspecting the output and the
error files produced by the application and by the queue manager.
"""
# 1) see it the job is blocked
# 2) see if an error occured at submitting the job the job was submitted, TODO these problems can be solved
# 3) see if there is output
# 4) see if abinit reports problems
# 5) see if both err files exist and are empty
# 6) no output and no err files, the job must still be running
# 7) try to find out what caused the problems
# 8) there is a problem but we did not figure out what ...
# 9) the only way of landing here is if there is a output file but no err files...
# 1) A locked task can only be unlocked by calling set_status explicitly.
# an errored task, should not end up here but just to be sure
black_list = (self.S_LOCKED, self.S_ERROR)
#if self.status in black_list: return self.status
# 2) Check the returncode of the process (the process of submitting the job) first.
# this point type of problem should also be handled by the scheduler error parser
if self.returncode != 0:
# The job was not submitted properly
return self.set_status(self.S_QCRITICAL, msg="return code %s" % self.returncode)
# If we have an abort file produced by Abinit
if self.mpiabort_file.exists:
return self.set_status(self.S_ABICRITICAL, msg="Found ABINIT abort file")
# Analyze the stderr file for Fortran runtime errors.
# getsize is 0 if the file is empty or it does not exist.
err_msg = None
if self.stderr_file.getsize() != 0:
#if self.stderr_file.exists:
err_msg = self.stderr_file.read()
# Analyze the stderr file of the resource manager runtime errors.
# TODO: Why are we looking for errors in queue.qerr?
qerr_info = None
if self.qerr_file.getsize() != 0:
#if self.qerr_file.exists:
qerr_info = self.qerr_file.read()
# Analyze the stdout file of the resource manager (needed for PBS !)
qout_info = None
if self.qout_file.getsize():
#if self.qout_file.exists:
qout_info = self.qout_file.read()
# Start to check ABINIT status if the output file has been created.
#if self.output_file.getsize() != 0:
if self.output_file.exists:
try:
report = self.get_event_report()
except Exception as exc:
msg = "%s exception while parsing event_report:\n%s" % (self, exc)
return self.set_status(self.S_ABICRITICAL, msg=msg)
if report is None:
return self.set_status(self.S_ERROR, msg="got None report!")
if report.run_completed:
# Here we set the correct timing data reported by Abinit
self.datetimes.start = report.start_datetime
self.datetimes.end = report.end_datetime
# Check if the calculation converged.
not_ok = report.filter_types(self.CRITICAL_EVENTS)
if not_ok:
return self.set_status(self.S_UNCONVERGED, msg='status set to unconverged based on abiout')
else:
return self.set_status(self.S_OK, msg="status set to ok based on abiout")
# Calculation still running or errors?
if report.errors:
# Abinit reported problems
logger.debug('Found errors in report')
for error in report.errors:
logger.debug(str(error))
try:
self.abi_errors.append(error)
except AttributeError:
self.abi_errors = [error]
# The job is unfixable due to ABINIT errors
logger.debug("%s: Found Errors or Bugs in ABINIT main output!" % self)
msg = "\n".join(map(repr, report.errors))
return self.set_status(self.S_ABICRITICAL, msg=msg)
# 5)
if self.stderr_file.exists and not err_msg:
if self.qerr_file.exists and not qerr_info:
# there is output and no errors
# The job still seems to be running
return self.set_status(self.S_RUN, msg='there is output and no errors: job still seems to be running')
# 6)
if not self.output_file.exists:
logger.debug("output_file does not exists")
if not self.stderr_file.exists and not self.qerr_file.exists:
# No output at allThe job is still in the queue.
return self.status
# 7) Analyze the files of the resource manager and abinit and execution err (mvs)
if qerr_info or qout_info:
from pymatgen.io.abinit.scheduler_error_parsers import get_parser
scheduler_parser = get_parser(self.manager.qadapter.QTYPE, err_file=self.qerr_file.path,
out_file=self.qout_file.path, run_err_file=self.stderr_file.path)
if scheduler_parser is None:
return self.set_status(self.S_QCRITICAL,
msg="Cannot find scheduler_parser for qtype %s" % self.manager.qadapter.QTYPE)
scheduler_parser.parse()
if scheduler_parser.errors:
self.queue_errors = scheduler_parser.errors
# the queue errors in the task
msg = "scheduler errors found:\n%s" % str(scheduler_parser.errors)
# self.history.critical(msg)
return self.set_status(self.S_QCRITICAL, msg=msg)
# The job is killed or crashed and we know what happened
elif lennone(qerr_info) > 0:
# if only qout_info, we are not necessarily in QCRITICAL state,
# since there will always be info in the qout file
msg = 'found unknown messages in the queue error: %s' % str(qerr_info)
logger.history.info(msg)
print(msg)
# self.num_waiting += 1
# if self.num_waiting > 1000:
rt = self.datetimes.get_runtime().seconds
tl = self.manager.qadapter.timelimit
if rt > tl:
msg += 'set to error : runtime (%s) exceded walltime (%s)' % (rt, tl)
print(msg)
return self.set_status(self.S_ERROR, msg=msg)
# The job may be killed or crashed but we don't know what happened
# It may also be that an innocent message was written to qerr, so we wait for a while
# it is set to QCritical, we will attempt to fix it by running on more resources
# 8) analizing the err files and abinit output did not identify a problem
# but if the files are not empty we do have a problem but no way of solving it:
if lennone(err_msg) > 0:
msg = 'found error message:\n %s' % str(err_msg)
return self.set_status(self.S_QCRITICAL, msg=msg)
# The job is killed or crashed but we don't know what happend
# it is set to QCritical, we will attempt to fix it by running on more resources
# 9) if we still haven't returned there is no indication of any error and the job can only still be running
# but we should actually never land here, or we have delays in the file system ....
# print('the job still seems to be running maybe it is hanging without producing output... ')
# Check time of last modification.
if self.output_file.exists and \
(time.time() - self.output_file.get_stat().st_mtime > self.manager.policy.frozen_timeout):
msg = "Task seems to be frozen, last change more than %s [s] ago" % self.manager.policy.frozen_timeout
return self.set_status(self.S_ERROR, msg=msg)
# Handle weird case in which either run.abo, or run.log have not been produced
#if self.status not in (self.S_INIT, self.S_READY) and (not self.output.file.exists or not self.log_file.exits):
# msg = "Task have been submitted but cannot find the log file or the output file"
# return self.set_status(self.S_ERROR, msg)
return self.set_status(self.S_RUN, msg='final option: nothing seems to be wrong, the job must still be running')
def reduce_memory_demand(self):
"""
Method that can be called by the scheduler to decrease the memory demand of a specific task.
Returns True in case of success, False in case of Failure.
Should be overwritten by specific tasks.
"""
return False
def speed_up(self):
"""
Method that can be called by the flow to decrease the time needed for a specific task.
Returns True in case of success, False in case of Failure
Should be overwritten by specific tasks.
"""
return False
def out_to_in(self, out_file):
"""
Move an output file to the output data directory of the `Task`
and rename the file so that ABINIT will read it as an input data file.
Returns:
The absolute path of the new file in the indata directory.
"""
in_file = os.path.basename(out_file).replace("out", "in", 1)
dest = os.path.join(self.indir.path, in_file)
if os.path.exists(dest) and not os.path.islink(dest):
logger.warning("Will overwrite %s with %s" % (dest, out_file))
os.rename(out_file, dest)
return dest
def inlink_file(self, filepath):
"""
Create a symbolic link to the specified file in the
directory containing the input files of the task.
"""
if not os.path.exists(filepath):
logger.debug("Creating symbolic link to not existent file %s" % filepath)
# Extract the Abinit extension and add the prefix for input files.
root, abiext = abi_splitext(filepath)
infile = "in_" + abiext
infile = self.indir.path_in(infile)
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
self.history.info("Linking path %s --> %s" % (filepath, infile))
if not os.path.exists(infile):
os.symlink(filepath, infile)
else:
if os.path.realpath(infile) != filepath:
raise self.Error("infile %s does not point to filepath %s" % (infile, filepath))
def make_links(self):
"""
Create symbolic links to the output files produced by the other tasks.
.. warning::
This method should be called only when the calculation is READY because
it uses a heuristic approach to find the file to link.
"""
for dep in self.deps:
filepaths, exts = dep.get_filepaths_and_exts()
for path, ext in zip(filepaths, exts):
logger.info("Need path %s with ext %s" % (path, ext))
dest = self.ipath_from_ext(ext)
if not os.path.exists(path):
# Try netcdf file. TODO: this case should be treated in a cleaner way.
path += ".nc"
if os.path.exists(path): dest += ".nc"
if not os.path.exists(path):
raise self.Error("%s: %s is needed by this task but it does not exist" % (self, path))
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
logger.debug("Linking path %s --> %s" % (path, dest))
if not os.path.exists(dest):
os.symlink(path, dest)
else:
# check links but only if we haven't performed the restart.
# in this case, indeed we may have replaced the file pointer with the
# previous output file of the present task.
if os.path.realpath(dest) != path and self.num_restarts == 0:
raise self.Error("dest %s does not point to path %s" % (dest, path))
@abc.abstractmethod
def setup(self):
"""Public method called before submitting the task."""
def _setup(self):
"""
This method calls self.setup after having performed additional operations
such as the creation of the symbolic links needed to connect different tasks.
"""
self.make_links()
self.setup()
def get_event_report(self, source="log"):
"""
Analyzes the main logfile of the calculation for possible Errors or Warnings.
If the ABINIT abort file is found, the error found in this file are added to
the output report.
Args:
source: "output" for the main output file,"log" for the log file.
Returns:
:class:`EventReport` instance or None if the source file file does not exist.
"""
# By default, we inspect the main log file.
ofile = {
"output": self.output_file,
"log": self.log_file}[source]
parser = events.EventsParser()
if not ofile.exists:
if not self.mpiabort_file.exists:
return None
else:
# ABINIT abort file without log!
abort_report = parser.parse(self.mpiabort_file.path)
return abort_report
try:
report = parser.parse(ofile.path)
#self._prev_reports[source] = report
# Add events found in the ABI_MPIABORTFILE.
if self.mpiabort_file.exists:
logger.critical("Found ABI_MPIABORTFILE!!!!!")
abort_report = parser.parse(self.mpiabort_file.path)
if len(abort_report) != 1:
logger.critical("Found more than one event in ABI_MPIABORTFILE")
# Weird case: empty abort file, let's skip the part
# below and hope that the log file contains the error message.
#if not len(abort_report): return report
# Add it to the initial report only if it differs
# from the last one found in the main log file.
last_abort_event = abort_report[-1]
if report and last_abort_event != report[-1]:
report.append(last_abort_event)
else:
report.append(last_abort_event)
return report
#except parser.Error as exc:
except Exception as exc:
# Return a report with an error entry with info on the exception.
msg = "%s: Exception while parsing ABINIT events:\n %s" % (ofile, str(exc))
self.set_status(self.S_ABICRITICAL, msg=msg)
return parser.report_exception(ofile.path, exc)
def get_results(self, **kwargs):
"""
Returns :class:`NodeResults` instance.
Subclasses should extend this method (if needed) by adding
specialized code that performs some kind of post-processing.
"""
# Check whether the process completed.
if self.returncode is None:
raise self.Error("return code is None, you should call wait, communitate or poll")
if self.status is None or self.status < self.S_DONE:
raise self.Error("Task is not completed")
return self.Results.from_node(self)
def move(self, dest, is_abspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir.
Use is_abspath=True to specify an absolute path.
"""
if not is_abspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def in_files(self):
"""Return all the input data files used."""
return self.indir.list_filepaths()
def out_files(self):
"""Return all the output data files produced."""
return self.outdir.list_filepaths()
def tmp_files(self):
"""Return all the input data files produced."""
return self.tmpdir.list_filepaths()
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the top-level working directory."""
return os.path.join(self.workdir, filename)
def rename(self, src_basename, dest_basename, datadir="outdir"):
"""
Rename a file located in datadir.
src_basename and dest_basename are the basename of the source file
and of the destination file, respectively.
"""
directory = {
"indir": self.indir,
"outdir": self.outdir,
"tmpdir": self.tmpdir,
}[datadir]
src = directory.path_in(src_basename)
dest = directory.path_in(dest_basename)
os.rename(src, dest)
#@check_spectator
def build(self, *args, **kwargs):
"""
Creates the working directory and the input files of the :class:`Task`.
It does not overwrite files if they already exist.
"""
# Create dirs for input, output and tmp data.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Write files file and input file.
if not self.files_file.exists:
self.files_file.write(self.filesfile_string)
self.input_file.write(self.make_input())
self.manager.write_jobfile(self)
#@check_spectator
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard: Optional string with regular expressions separated by |.
Files matching one of the regular expressions will be preserved.
example: exclude_wildcard="*.nc|*.txt" preserves all the files whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
filepath = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(filepath)
def remove_files(self, *filenames):
"""Remove all the files listed in filenames."""
filenames = list_strings(filenames)
for dirpath, dirnames, fnames in os.walk(self.workdir):
for fname in fnames:
if fname in filenames:
filepath = os.path.join(dirpath, fname)
os.remove(filepath)
def clean_output_files(self, follow_parents=True):
"""
This method is called when the task reaches S_OK. It removes all the output files
produced by the task that are not needed by its children as well as the output files
produced by its parents if no other node needs them.
Args:
follow_parents: If true, the output files of the parents nodes will be removed if possible.
Return:
list with the absolute paths of the files that have been removed.
"""
paths = []
if self.status != self.S_OK:
logger.warning("Calling task.clean_output_files on a task whose status != S_OK")
# Remove all files in tmpdir.
self.tmpdir.clean()
# Find the file extensions that should be preserved since these files are still
# needed by the children who haven't reached S_OK
except_exts = set()
for child in self.get_children():
if child.status == self.S_OK: continue
# Find the position of self in child.deps and add the extensions.
i = [dep.node for dep in child.deps].index(self)
except_exts.update(child.deps[i].exts)
# Remove the files in the outdir of the task but keep except_exts.
exts = self.gc.exts.difference(except_exts)
#print("Will remove its extensions: ", exts)
paths += self.outdir.remove_exts(exts)
if not follow_parents: return paths
# Remove the files in the outdir of my parents if all the possible dependencies have been fulfilled.
for parent in self.get_parents():
# Here we build a dictionary file extension --> list of child nodes requiring this file from parent
# e.g {"WFK": [node1, node2]}
ext2nodes = collections.defaultdict(list)
for child in parent.get_children():
if child.status == child.S_OK: continue
i = [d.node for d in child.deps].index(parent)
for ext in child.deps[i].exts:
ext2nodes[ext].append(child)
# Remove extension only if no node depends on it!
except_exts = [k for k, lst in ext2nodes.items() if lst]
exts = self.gc.exts.difference(except_exts)
#print("%s removes extensions %s from parent node %s" % (self, exts, parent))
paths += parent.outdir.remove_exts(exts)
self.history.info("Removed files: %s" % paths)
return paths
def setup(self):
"""Base class does not provide any hook."""
#@check_spectator
def start(self, **kwargs):
"""
Starts the calculation by performing the following steps:
- build dirs and files
- call the _setup method
- execute the job file by executing/submitting the job script.
Main entry point for the `Launcher`.
============== ==============================================================
kwargs Meaning
============== ==============================================================
autoparal False to skip the autoparal step (default True)
exec_args List of arguments passed to executable.
============== ==============================================================
Returns:
1 if task was started, 0 otherwise.
"""
if self.status >= self.S_SUB:
raise self.Error("Task status: %s" % str(self.status))
if self.start_lockfile.exists:
self.history.warning("Found lock file: %s" % self.start_lockfile.path)
return 0
self.start_lockfile.write("Started on %s" % time.asctime())
self.build()
self._setup()
# Add the variables needed to connect the node.
for d in self.deps:
cvars = d.connecting_vars()
self.history.info("Adding connecting vars %s" % cvars)
self.set_vars(cvars)
# Get (python) data from other nodes
d.apply_getters(self)
# Automatic parallelization
if kwargs.pop("autoparal", True) and hasattr(self, "autoparal_run"):
try:
self.autoparal_run()
except QueueAdapterError as exc:
# If autoparal cannot find a qadapter to run the calculation raises an Exception
self.history.critical(exc)
msg = "Error trying to find a running configuration:\n%s" % straceback()
self.set_status(self.S_QCRITICAL, msg=msg)
return 0
except Exception as exc:
# Sometimes autoparal_run fails because Abinit aborts
# at the level of the parser e.g. cannot find the spacegroup
# due to some numerical noise in the structure.
# In this case we call fix_abicritical and then we try to run autoparal again.
self.history.critical("First call to autoparal failed with `%s`. Will try fix_abicritical" % exc)
msg = "autoparal_fake_run raised:\n%s" % straceback()
logger.critical(msg)
fixed = self.fix_abicritical()
if not fixed:
self.set_status(self.S_ABICRITICAL, msg="fix_abicritical could not solve the problem")
return 0
try:
self.autoparal_run()
self.history.info("Second call to autoparal succeeded!")
except Exception as exc:
self.history.critical("Second call to autoparal failed with %s. Cannot recover!", exc)
msg = "Tried autoparal again but got:\n%s" % straceback()
# logger.critical(msg)
self.set_status(self.S_ABICRITICAL, msg=msg)
return 0
# Start the calculation in a subprocess and return.
self._process = self.manager.launch(self, **kwargs)
return 1
def start_and_wait(self, *args, **kwargs):
"""
Helper method to start the task and wait for completetion.
Mainly used when we are submitting the task via the shell without passing through a queue manager.
"""
self.start(*args, **kwargs)
retcode = self.wait()
return retcode
class DecreaseDemandsError(Exception):
"""
exception to be raised by a task if the request to decrease some demand, load or memory, could not be performed
"""
class AbinitTask(Task):
"""
Base class defining an ABINIT calculation
"""
Results = TaskResults
@classmethod
def from_input(cls, input, workdir=None, manager=None):
"""
Create an instance of `AbinitTask` from an ABINIT input.
Args:
ainput: `AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
return cls(input, workdir=workdir, manager=manager)
@classmethod
def temp_shell_task(cls, inp, workdir=None, manager=None):
"""
Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.
Mainly used for invoking Abinit to get important parameters needed to prepare the real task.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=1))
task.set_name('temp_shell_task')
return task
def setup(self):
"""
Abinit has the very *bad* habit of changing the file extension by appending the characters in [A,B ..., Z]
to the output file, and this breaks a lot of code that relies of the use of a unique file extension.
Here we fix this issue by renaming run.abo to run.abo_[number] if the output file "run.abo" already
exists. A few lines of code in python, a lot of problems if you try to implement this trick in Fortran90.
"""
def rename_file(afile):
"""Helper function to rename :class:`File` objects. Return string for logging purpose."""
# Find the index of the last file (if any).
# TODO: Maybe it's better to use run.abo --> run(1).abo
fnames = [f for f in os.listdir(self.workdir) if f.startswith(afile.basename)]
nums = [int(f) for f in [f.split("_")[-1] for f in fnames] if f.isdigit()]
last = max(nums) if nums else 0
new_path = afile.path + "_" + str(last+1)
os.rename(afile.path, new_path)
return "Will rename %s to %s" % (afile.path, new_path)
logs = []
if self.output_file.exists: logs.append(rename_file(self.output_file))
if self.log_file.exists: logs.append(rename_file(self.log_file))
if logs:
self.history.info("\n".join(logs))
@property
def executable(self):
"""Path to the executable required for running the Task."""
try:
return self._executable
except AttributeError:
return "abinit"
@property
def pseudos(self):
"""List of pseudos used in the calculation."""
return self.input.pseudos
@property
def isnc(self):
"""True if norm-conserving calculation."""
return self.input.isnc
@property
def ispaw(self):
"""True if PAW calculation"""
return self.input.ispaw
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
pj = os.path.join
app(self.input_file.path) # Path to the input file
app(self.output_file.path) # Path to the output file
app(pj(self.workdir, self.prefix.idata)) # Prefix for input data
app(pj(self.workdir, self.prefix.odata)) # Prefix for output data
app(pj(self.workdir, self.prefix.tdata)) # Prefix for temporary data
# Paths to the pseudopotential files.
# Note that here the pseudos **must** be sorted according to znucl.
# Here we reorder the pseudos if the order is wrong.
ord_pseudos = []
znucl = [specie.number for specie in
self.input.structure.types_of_specie]
for z in znucl:
for p in self.pseudos:
if p.Z == z:
ord_pseudos.append(p)
break
else:
raise ValueError("Cannot find pseudo with znucl %s in pseudos:\n%s" % (z, self.pseudos))
for pseudo in ord_pseudos:
app(pseudo.path)
return "\n".join(lines)
def set_pconfs(self, pconfs):
"""Set the list of autoparal configurations."""
self._pconfs = pconfs
@property
def pconfs(self):
"""List of autoparal configurations."""
try:
return self._pconfs
except AttributeError:
return None
def uses_paral_kgb(self, value=1):
"""True if the task is a GS Task and uses paral_kgb with the given value."""
paral_kgb = self.get_inpvar("paral_kgb", 0)
# paral_kgb is used only in the GS part.
return paral_kgb == value and isinstance(self, GsTask)
def _change_structure(self, new_structure):
"""Change the input structure."""
# Compare new and old structure for logging purpose.
# TODO: Write method of structure to compare self and other and return a dictionary
old_structure = self.input.structure
old_lattice = old_structure.lattice
abc_diff = np.array(new_structure.lattice.abc) - np.array(old_lattice.abc)
angles_diff = np.array(new_structure.lattice.angles) - np.array(old_lattice.angles)
cart_diff = new_structure.cart_coords - old_structure.cart_coords
displs = np.array([np.sqrt(np.dot(v, v)) for v in cart_diff])
recs, tol_angle, tol_length = [], 10**-2, 10**-5
if np.any(np.abs(angles_diff) > tol_angle):
recs.append("new_agles - old_angles = %s" % angles_diff)
if np.any(np.abs(abc_diff) > tol_length):
recs.append("new_abc - old_abc = %s" % abc_diff)
if np.any(np.abs(displs) > tol_length):
min_pos, max_pos = displs.argmin(), displs.argmax()
recs.append("Mean displ: %.2E, Max_displ: %.2E (site %d), min_displ: %.2E (site %d)" %
(displs.mean(), displs[max_pos], max_pos, displs[min_pos], min_pos))
self.history.info("Changing structure (only significant diffs are shown):")
if not recs:
self.history.info("Input and output structure seems to be equal within the given tolerances")
else:
for rec in recs:
self.history.info(rec)
self.input.set_structure(new_structure)
#assert self.input.structure == new_structure
def autoparal_run(self):
"""
Find an optimal set of parameters for the execution of the task
This method can change the ABINIT input variables and/or the
submission parameters e.g. the number of CPUs for MPI and OpenMp.
Set:
self.pconfs where pconfs is a :class:`ParalHints` object with the configuration reported by
autoparal and optimal is the optimal configuration selected.
Returns 0 if success
"""
policy = self.manager.policy
if policy.autoparal == 0: # or policy.max_ncpus in [None, 1]:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
############################################################################
# Run ABINIT in sequential to get the possible configurations with max_ncpus
############################################################################
# Set the variables for automatic parallelization
# Will get all the possible configurations up to max_ncpus
# Return immediately if max_ncpus == 1
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus)
self.set_vars(autoparal_vars)
# Run the job in a shell subprocess with mpi_procs = 1
# we don't want to make a request to the queue manager for this simple job!
# Return code is always != 0
process = self.manager.to_shell_manager(mpi_procs=1).launch(self)
self.history.pop()
retcode = process.wait()
# Remove the variables added for the automatic parallelization
self.input.remove_vars(autoparal_vars.keys())
##############################################################
# Parse the autoparal configurations from the main output file
##############################################################
parser = ParalHintsParser()
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
######################################################
# Select the optimal configuration according to policy
######################################################
optconf = self.find_optconf(pconfs)
####################################################
# Change the input file and/or the submission script
####################################################
self.set_vars(optconf.vars)
# Write autoparal configurations to JSON file.
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
##############
# Finalization
##############
# Reset the status, remove garbage files ...
self.set_status(self.S_INIT, msg='finished autoparallel run')
# Remove the output file since Abinit likes to create new files
# with extension .outA, .outB if the file already exists.
os.remove(self.output_file.path)
os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0
def find_optconf(self, pconfs):
"""Find the optimal Parallel configuration."""
# Save pconfs for future reference.
self.set_pconfs(pconfs)
# Select the partition on which we'll be running and set MPI/OMP cores.
optconf = self.manager.select_qadapter(pconfs)
return optconf
def select_files(self, what="o"):
"""
Helper function used to select the files of a task.
Args:
what: string with the list of characters selecting the file type
Possible choices:
i ==> input_file,
o ==> output_file,
f ==> files_file,
j ==> job_file,
l ==> log_file,
e ==> stderr_file,
q ==> qout_file,
all ==> all files.
"""
choices = collections.OrderedDict([
("i", self.input_file),
("o", self.output_file),
("f", self.files_file),
("j", self.job_file),
("l", self.log_file),
("e", self.stderr_file),
("q", self.qout_file),
])
if what == "all":
return [getattr(v, "path") for v in choices.values()]
selected = []
for c in what:
try:
selected.append(getattr(choices[c], "path"))
except KeyError:
logger.warning("Wrong keyword %s" % c)
return selected
def restart(self):
"""
general restart used when scheduler problems have been taken care of
"""
return self._restart()
#@check_spectator
def reset_from_scratch(self):
"""
Restart from scratch, this is to be used if a job is restarted with more resources after a crash
Move output files produced in workdir to _reset otherwise check_status continues
to see the task as crashed even if the job did not run
"""
# Create reset directory if not already done.
reset_dir = os.path.join(self.workdir, "_reset")
reset_file = os.path.join(reset_dir, "_counter")
if not os.path.exists(reset_dir):
os.mkdir(reset_dir)
num_reset = 1
else:
with open(reset_file, "rt") as fh:
num_reset = 1 + int(fh.read())
# Move files to reset and append digit with reset index.
def move_file(f):
if not f.exists: return
try:
f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset)))
except OSError as exc:
logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc)))
for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file"):
move_file(getattr(self, fname))
with open(reset_file, "wt") as fh:
fh.write(str(num_reset))
self.start_lockfile.remove()
# Reset datetimes
self.datetimes.reset()
return self._restart(submit=False)
#@check_spectator
def fix_abicritical(self):
"""
method to fix crashes/error caused by abinit
Returns:
1 if task has been fixed else 0.
"""
event_handlers = self.event_handlers
if not event_handlers:
self.set_status(status=self.S_ERROR, msg='Empty list of event handlers. Cannot fix abi_critical errors')
return 0
count, done = 0, len(event_handlers) * [0]
report = self.get_event_report()
if report is None:
self.set_status(status=self.S_ERROR, msg='get_event_report returned None')
return 0
# Note we have loop over all possible events (slow, I know)
# because we can have handlers for Error, Bug or Warning
# (ideally only for CriticalWarnings but this is not done yet)
for event in report:
for i, handler in enumerate(self.event_handlers):
if handler.can_handle(event) and not done[i]:
logger.info("handler %s will try to fix event %s" % (handler, event))
try:
d = handler.handle_task_event(self, event)
if d:
done[i] += 1
count += 1
except Exception as exc:
logger.critical(str(exc))
if count:
self.reset_from_scratch()
return 1
self.set_status(status=self.S_ERROR, msg='We encountered AbiCritical events that could not be fixed')
return 0
#@check_spectator
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
General strategy, first try to increase resources in order to fix the problem,
if this is not possible, call a task specific method to attempt to decrease the demands.
Returns:
1 if task has been fixed else 0.
"""
from pymatgen.io.abinit.scheduler_error_parsers import NodeFailureError, MemoryCancelError, TimeCancelError
#assert isinstance(self.manager, TaskManager)
self.history.info('fixing queue critical')
ret = "task.fix_queue_critical: "
if not self.queue_errors:
# TODO
# paral_kgb = 1 leads to nasty sigegv that are seen as Qcritical errors!
# Try to fallback to the conjugate gradient.
#if self.uses_paral_kgb(1):
# logger.critical("QCRITICAL with PARAL_KGB==1. Will try CG!")
# self.set_vars(paral_kgb=0)
# self.reset_from_scratch()
# return
# queue error but no errors detected, try to solve by increasing ncpus if the task scales
# if resources are at maximum the task is definitively turned to errored
if self.mem_scales or self.load_scales:
try:
self.manager.increase_resources() # acts either on the policy or on the qadapter
self.reset_from_scratch()
ret += "increased resources"
return ret
except ManagerIncreaseError:
self.set_status(self.S_ERROR, msg='unknown queue error, could not increase resources any further')
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='unknown queue error, no options left')
raise FixQueueCriticalError
else:
print("Fix_qcritical: received %d queue_errors" % len(self.queue_errors))
print("type_list: %s" % list(type(qe) for qe in self.queue_errors))
for error in self.queue_errors:
self.history.info('fixing: %s' % str(error))
ret += str(error)
if isinstance(error, NodeFailureError):
# if the problematic node is known, exclude it
if error.nodes is not None:
try:
self.manager.exclude_nodes(error.nodes)
self.reset_from_scratch()
self.set_status(self.S_READY, msg='excluding nodes')
except:
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='Node error but no node identified.')
raise FixQueueCriticalError
elif isinstance(error, MemoryCancelError):
# ask the qadapter to provide more resources, i.e. more cpu's so more total memory if the code
# scales this should fix the memeory problem
# increase both max and min ncpu of the autoparalel and rerun autoparalel
if self.mem_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased ncps to solve memory problem')
return
except ManagerIncreaseError:
self.history.warning('increasing ncpus failed')
# if the max is reached, try to increase the memory per cpu:
try:
self.manager.increase_mem()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased mem')
return
except ManagerIncreaseError:
self.history.warning('increasing mem failed')
# if this failed ask the task to provide a method to reduce the memory demand
try:
self.reduce_memory_demand()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='decreased mem demand')
return
except DecreaseDemandsError:
self.history.warning('decreasing demands failed')
msg = ('Memory error detected but the memory could not be increased neigther could the\n'
'memory demand be decreased. Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
raise FixQueueCriticalError
elif isinstance(error, TimeCancelError):
# ask the qadapter to provide more time
print('trying to increase time')
try:
self.manager.increase_time()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased wall time')
return
except ManagerIncreaseError:
self.history.warning('increasing the waltime failed')
# if this fails ask the qadapter to increase the number of cpus
if self.load_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased number of cpus')
return
except ManagerIncreaseError:
self.history.warning('increase ncpus to speed up the calculation to stay in the walltime failed')
# if this failed ask the task to provide a method to speed up the task
try:
self.speed_up()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='task speedup')
return
except DecreaseDemandsError:
self.history.warning('decreasing demands failed')
msg = ('Time cancel error detected but the time could not be increased neither could\n'
'the time demand be decreased by speedup of increasing the number of cpus.\n'
'Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
else:
msg = 'No solution provided for error %s. Unrecoverable error.' % error.name
self.set_status(self.S_ERROR, msg)
return 0
def parse_timing(self):
"""
Parse the timer data in the main output file of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Return: :class:`AbinitTimerParser` instance, None if error.
"""
from .abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(self.output_file.path)
if read_ok:
return parser
return None
class ProduceHist(object):
"""
Mixin class for an :class:`AbinitTask` producing a HIST file.
Provide the method `open_hist` that reads and return a HIST file.
"""
@property
def hist_path(self):
"""Absolute path of the HIST file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._hist_path
except AttributeError:
path = self.outdir.has_abiext("HIST")
if path: self._hist_path = path
return path
def open_hist(self):
"""
Open the HIST file located in the in self.outdir.
Returns :class:`HistFile` object, None if file could not be found or file is not readable.
"""
if not self.hist_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a HIST file in %s" % (self, self.outdir))
return None
# Open the HIST file
from abipy.dynamics.hist import HistFile
try:
return HistFile(self.hist_path)
except Exception as exc:
logger.critical("Exception while reading HIST file at %s:\n%s" % (self.hist_path, str(exc)))
return None
class GsTask(AbinitTask):
"""
Base class for ground-state tasks. A ground state task produces a GSR file
Provides the method `open_gsr` that reads and returns a GSR file.
"""
@property
def gsr_path(self):
"""Absolute path of the GSR file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._gsr_path
except AttributeError:
path = self.outdir.has_abiext("GSR")
if path: self._gsr_path = path
return path
def open_gsr(self):
"""
Open the GSR file located in the in self.outdir.
Returns :class:`GsrFile` object, None if file could not be found or file is not readable.
"""
gsr_path = self.gsr_path
if not gsr_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
# Open the GSR file.
from abipy.electrons.gsr import GsrFile
try:
return GsrFile(gsr_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (gsr_path, str(exc)))
return None
class ScfTask(GsTask):
"""
Self-consistent ground-state calculations.
Provide support for in-place restart via (WFK|DEN) files
"""
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
color_rgb = np.array((255, 0, 0)) / 255
def restart(self):
"""SCF calculations can be restarted if we have either the WFK file or the DEN file."""
# Prefer WFK over DEN files since we can reuse the wavefunctions.
for ext in ("WFK", "DEN"):
restart_file = self.outdir.has_abiext(ext)
irdvars = irdvars_for_ext(ext)
if restart_file: break
else:
raise self.RestartError("%s: Cannot find WFK or DEN file to restart from." % self)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
def inspect(self, **kwargs):
"""
Plot the SCF cycle results with matplotlib.
Returns
`matplotlib` figure, None if some error occurred.
"""
try:
scf_cycle = abiinspect.GroundStateScfCycle.from_file(self.output_file.path)
except IOError:
return None
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
return None
def get_results(self, **kwargs):
results = super(ScfTask, self).get_results(**kwargs)
# Open the GSR file and add its data to results.out
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
class CollinearThenNonCollinearScfTask(ScfTask):
"""
A specialized ScfTaks that performs an initial SCF run with nsppol = 2.
The spin polarized WFK file is then used to start a non-collinear SCF run (nspinor == 2)
initialized from the previous WFK file.
"""
def __init__(self, input, workdir=None, manager=None, deps=None):
super(CollinearThenNonCollinearScfTask, self).__init__(input, workdir=workdir, manager=manager, deps=deps)
# Enforce nspinor = 1, nsppol = 2 and prtwf = 1.
self._input = self.input.deepcopy()
self.input.set_spin_mode("polarized")
self.input.set_vars(prtwf=1)
self.collinear_done = False
def _on_ok(self):
results = super(CollinearThenNonCollinearScfTask, self)._on_ok()
if not self.collinear_done:
self.input.set_spin_mode("spinor")
self.collinear_done = True
self.finalized = False
self.restart()
return results
class NscfTask(GsTask):
"""
Non-Self-consistent GS calculation. Provide in-place restart via WFK files
"""
CRITICAL_EVENTS = [
events.NscfConvergenceWarning,
]
color_rgb = np.array((255, 122, 122)) / 255
def restart(self):
"""NSCF calculations can be restarted only if we have the WFK file."""
ext = "WFK"
restart_file = self.outdir.has_abiext(ext)
if not restart_file:
raise self.RestartError("%s: Cannot find the WFK file to restart from." % self)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext(ext)
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
def get_results(self, **kwargs):
results = super(NscfTask, self).get_results(**kwargs)
# Read the GSR file.
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
class RelaxTask(GsTask, ProduceHist):
"""
Task for structural optimizations.
"""
# TODO possible ScfConvergenceWarning?
CRITICAL_EVENTS = [
events.RelaxConvergenceWarning,
]
color_rgb = np.array((255, 61, 255)) / 255
def get_final_structure(self):
"""Read the final structure from the GSR file."""
try:
with self.open_gsr() as gsr:
return gsr.structure
except AttributeError:
raise RuntimeError("Cannot find the GSR file with the final structure to restart from.")
def restart(self):
"""
Restart the structural relaxation.
Structure relaxations can be restarted only if we have the WFK file or the DEN or the GSR file.
from which we can read the last structure (mandatory) and the wavefunctions (not mandatory but useful).
Prefer WFK over other files since we can reuse the wavefunctions.
.. note::
The problem in the present approach is that some parameters in the input
are computed from the initial structure and may not be consistent with
the modification of the structure done during the structure relaxation.
"""
restart_file = None
# Try to restart from the WFK file if possible.
# FIXME: This part has been disabled because WFK=IO is a mess if paral_kgb == 1
# This is also the reason why I wrote my own MPI-IO code for the GW part!
wfk_file = self.outdir.has_abiext("WFK")
if False and wfk_file:
irdvars = irdvars_for_ext("WFK")
restart_file = self.out_to_in(wfk_file)
# Fallback to DEN file. Note that here we look for out_DEN instead of out_TIM?_DEN
# This happens when the previous run completed and task.on_done has been performed.
# ********************************************************************************
# Note that it's possible to have an undected error if we have multiple restarts
# and the last relax died badly. In this case indeed out_DEN is the file produced
# by the last run that has executed on_done.
# ********************************************************************************
if restart_file is None:
out_den = self.outdir.path_in("out_DEN")
if os.path.exists(out_den):
irdvars = irdvars_for_ext("DEN")
restart_file = self.out_to_in(out_den)
if restart_file is None:
# Try to restart from the last TIM?_DEN file.
# This should happen if the previous run didn't complete in clean way.
# Find the last TIM?_DEN file.
last_timden = self.outdir.find_last_timden_file()
if last_timden is not None:
ofile = self.outdir.path_in("out_DEN")
os.rename(last_timden.path, ofile)
restart_file = self.out_to_in(ofile)
irdvars = irdvars_for_ext("DEN")
if restart_file is None:
# Don't raise RestartError as we can still change the structure.
self.history.warning("Cannot find the WFK|DEN|TIM?_DEN file to restart from.")
else:
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
self.history.info("Will restart from %s", restart_file)
# FIXME Here we should read the HIST file but restartxf if broken!
#self.set_vars({"restartxf": -1})
# Read the relaxed structure from the GSR file and change the input.
self._change_structure(self.get_final_structure())
# Now we can resubmit the job.
return self._restart()
def inspect(self, **kwargs):
"""
Plot the evolution of the structural relaxation with matplotlib.
Args:
what: Either "hist" or "scf". The first option (default) extracts data
from the HIST file and plot the evolution of the structural
parameters, forces, pressures and energies.
The second option, extracts data from the main output file and
plot the evolution of the SCF cycles (etotal, residuals, etc).
Returns:
`matplotlib` figure, None if some error occurred.
"""
what = kwargs.pop("what", "hist")
if what == "hist":
# Read the hist file to get access to the structure.
with self.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
elif what == "scf":
# Get info on the different SCF cycles
relaxation = abiinspect.Relaxation.from_file(self.output_file.path)
if "title" not in kwargs: kwargs["title"] = str(self)
return relaxation.plot(**kwargs) if relaxation is not None else None
else:
raise ValueError("Wrong value for what %s" % what)
def get_results(self, **kwargs):
results = super(RelaxTask, self).get_results(**kwargs)
# Open the GSR file and add its data to results.out
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
def reduce_dilatmx(self, target=1.01):
actual_dilatmx = self.get_inpvar('dilatmx', 1.)
new_dilatmx = actual_dilatmx - min((actual_dilatmx-target), actual_dilatmx*0.05)
self.set_vars(dilatmx=new_dilatmx)
def fix_ofiles(self):
"""
Note that ABINIT produces lots of out_TIM1_DEN files for each step.
Here we list all TIM*_DEN files, we select the last one and we rename it in out_DEN
This change is needed so that we can specify dependencies with the syntax {node: "DEN"}
without having to know the number of iterations needed to converge the run in node!
"""
super(RelaxTask, self).fix_ofiles()
# Find the last TIM?_DEN file.
last_timden = self.outdir.find_last_timden_file()
if last_timden is None:
logger.warning("Cannot find TIM?_DEN files")
return
# Rename last TIMDEN with out_DEN.
ofile = self.outdir.path_in("out_DEN")
self.history.info("Renaming last_denfile %s --> %s" % (last_timden.path, ofile))
os.rename(last_timden.path, ofile)
class DfptTask(AbinitTask):
"""
Base class for DFPT tasks (Phonons, ...)
Mainly used to implement methods that are common to DFPT calculations with Abinit.
Provide the method `open_ddb` that reads and return a Ddb file.
.. warning::
This class should not be instantiated directly.
"""
@property
def ddb_path(self):
"""Absolute path of the DDB file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._ddb_path
except AttributeError:
path = self.outdir.has_abiext("DDB")
if path: self._ddb_path = path
return path
def open_ddb(self):
"""
Open the DDB file located in the in self.outdir.
Returns :class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.ddb_path
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a DDB file in %s" % (self, self.outdir))
return None
# Open the DDB file.
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
# TODO Remove
class DdeTask(DfptTask):
"""Task for DDE calculations."""
def get_results(self, **kwargs):
results = super(DdeTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDB=(self.outdir.has_abiext("DDE"), "t"))
class DdkTask(DfptTask):
"""Task for DDK calculations."""
color_rgb = np.array((61, 158, 255)) / 255
#@check_spectator
def _on_ok(self):
super(DdkTask, self)._on_ok()
# Copy instead of removing, otherwise optic tests fail
# Fixing this problem requires a rationalization of file extensions.
#if self.outdir.rename_abiext('1WF', 'DDK') > 0:
#if self.outdir.copy_abiext('1WF', 'DDK') > 0:
self.outdir.symlink_abiext('1WF', 'DDK')
def get_results(self, **kwargs):
results = super(DdkTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDK=(self.outdir.has_abiext("DDK"), "t"))
class BecTask(DfptTask):
"""
Task for the calculation of Born effective charges.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
"""
color_rgb = np.array((122, 122, 255)) / 255
def make_links(self):
"""Replace the default behaviour of make_links"""
#print("In BEC make_links")
for dep in self.deps:
if dep.exts == ["DDK"]:
ddk_task = dep.node
out_ddk = ddk_task.outdir.has_abiext("DDK")
if not out_ddk:
raise RuntimeError("%s didn't produce the DDK file" % ddk_task)
# Get (fortran) idir and costruct the name of the 1WF expected by Abinit
rfdir = list(ddk_task.input["rfdir"])
if rfdir.count(1) != 1:
raise RuntimeError("Only one direction should be specifned in rfdir but rfdir = %s" % rfdir)
idir = rfdir.index(1) + 1
ddk_case = idir + 3 * len(ddk_task.input.structure)
infile = self.indir.path_in("in_1WF%d" % ddk_case)
os.symlink(out_ddk, infile)
elif dep.exts == ["WFK"]:
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("WFK")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
os.symlink(out_wfk, self.indir.path_in("in_WFK"))
else:
raise ValueError("Don't know how to handle extension: %s" % dep.exts)
class PhononTask(DfptTask):
"""
DFPT calculations for a single atomic perturbation.
Provide support for in-place restart via (1WF|1DEN) files
"""
# TODO:
# for the time being we don't discern between GS and PhononCalculations.
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
color_rgb = np.array((0, 0, 255)) / 255
def restart(self):
"""
Phonon calculations can be restarted only if we have the 1WF file or the 1DEN file.
from which we can read the first-order wavefunctions or the first order density.
Prefer 1WF over 1DEN since we can reuse the wavefunctions.
"""
# Abinit adds the idir-ipert index at the end of the file and this breaks the extension
# e.g. out_1WF4, out_DEN4. find_1wf_files and find_1den_files returns the list of files found
restart_file, irdvars = None, None
# Highest priority to the 1WF file because restart is more efficient.
wf_files = self.outdir.find_1wf_files()
if wf_files is not None:
restart_file = wf_files[0].path
irdvars = irdvars_for_ext("1WF")
if len(wf_files) != 1:
restart_file = None
logger.critical("Found more than one 1WF file. Restart is ambiguous!")
if restart_file is None:
den_files = self.outdir.find_1den_files()
if den_files is not None:
restart_file = den_files[0].path
irdvars = {"ird1den": 1}
if len(den_files) != 1:
restart_file = None
logger.critical("Found more than one 1DEN file. Restart is ambiguous!")
if restart_file is None:
# Raise because otherwise restart is equivalent to a run from scratch --> infinite loop!
raise self.RestartError("%s: Cannot find the 1WF|1DEN file to restart from." % self)
# Move file.
self.history.info("Will restart from %s", restart_file)
restart_file = self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
return self._restart()
def inspect(self, **kwargs):
"""
Plot the Phonon SCF cycle results with matplotlib.
Returns:
`matplotlib` figure, None if some error occurred.
"""
scf_cycle = abiinspect.PhononScfCycle.from_file(self.output_file.path)
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
def get_results(self, **kwargs):
results = super(PhononTask, self).get_results(**kwargs)
return results.register_gridfs_files(DDB=(self.outdir.has_abiext("DDB"), "t"))
def make_links(self):
super(PhononTask, self).make_links()
# fix the problem that abinit uses the 1WF extension for the DDK output file but reads it with the irdddk flag
#if self.indir.has_abiext('DDK'):
# self.indir.rename_abiext('DDK', '1WF')
class EphTask(AbinitTask):
"""
Class for electron-phonon calculations.
"""
color_rgb = np.array((255, 128, 0)) / 255
class ManyBodyTask(AbinitTask):
"""
Base class for Many-body tasks (Screening, Sigma, Bethe-Salpeter)
Mainly used to implement methods that are common to MBPT calculations with Abinit.
.. warning::
This class should not be instantiated directly.
"""
def reduce_memory_demand(self):
"""
Method that can be called by the scheduler to decrease the memory demand of a specific task.
Returns True in case of success, False in case of Failure.
"""
# The first digit governs the storage of W(q), the second digit the storage of u(r)
# Try to avoid the storage of u(r) first since reading W(q) from file will lead to a drammatic slowdown.
prev_gwmem = int(self.get_inpvar("gwmem", default=11))
first_dig, second_dig = prev_gwmem // 10, prev_gwmem % 10
if second_dig == 1:
self.set_vars(gwmem="%.2d" % (10 * first_dig))
return True
if first_dig == 1:
self.set_vars(gwmem="%.2d" % 00)
return True
# gwmem 00 d'oh!
return False
class ScrTask(ManyBodyTask):
"""Tasks for SCREENING calculations """
color_rgb = np.array((255, 128, 0)) / 255
#def inspect(self, **kwargs):
# """Plot graph showing the number of q-points computed and the wall-time used"""
@property
def scr_path(self):
"""Absolute path of the SCR file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._scr_path
except AttributeError:
path = self.outdir.has_abiext("SCR.nc")
if path: self._scr_path = path
return path
def open_scr(self):
"""
Open the SIGRES file located in the in self.outdir.
Returns :class:`ScrFile` object, None if file could not be found or file is not readable.
"""
scr_path = self.scr_path
if not scr_path:
logger.critical("%s didn't produce a SCR.nc file in %s" % (self, self.outdir))
return None
# Open the GSR file and add its data to results.out
from abipy.electrons.scr import ScrFile
try:
return ScrFile(scr_path)
except Exception as exc:
logger.critical("Exception while reading SCR file at %s:\n%s" % (scr_path, str(exc)))
return None
class SigmaTask(ManyBodyTask):
"""
Tasks for SIGMA calculations. Provides support for in-place restart via QPS files
"""
CRITICAL_EVENTS = [
events.QPSConvergenceWarning,
]
color_rgb = np.array((0, 255, 0)) / 255
def restart(self):
# G calculations can be restarted only if we have the QPS file
# from which we can read the results of the previous step.
ext = "QPS"
restart_file = self.outdir.has_abiext(ext)
if not restart_file:
raise self.RestartError("%s: Cannot find the QPS file to restart from." % self)
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext(ext)
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
#def inspect(self, **kwargs):
# """Plot graph showing the number of k-points computed and the wall-time used"""
@property
def sigres_path(self):
"""Absolute path of the SIGRES file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._sigres_path
except AttributeError:
path = self.outdir.has_abiext("SIGRES")
if path: self._sigres_path = path
return path
def open_sigres(self):
"""
Open the SIGRES file located in the in self.outdir.
Returns :class:`SigresFile` object, None if file could not be found or file is not readable.
"""
sigres_path = self.sigres_path
if not sigres_path:
logger.critical("%s didn't produce a SIGRES file in %s" % (self, self.outdir))
return None
# Open the SIGRES file and add its data to results.out
from abipy.electrons.gw import SigresFile
try:
return SigresFile(sigres_path)
except Exception as exc:
logger.critical("Exception while reading SIGRES file at %s:\n%s" % (sigres_path, str(exc)))
return None
def get_scissors_builder(self):
"""
Returns an instance of :class:`ScissorsBuilder` from the SIGRES file.
Raise:
`RuntimeError` if SIGRES file is not found.
"""
from abipy.electrons.scissors import ScissorsBuilder
if self.sigres_path:
return ScissorsBuilder.from_file(self.sigres_path)
else:
raise RuntimeError("Cannot find SIGRES file!")
def get_results(self, **kwargs):
results = super(SigmaTask, self).get_results(**kwargs)
# Open the SIGRES file and add its data to results.out
with self.open_sigres() as sigres:
#results["out"].update(sigres.as_dict())
results.register_gridfs_files(SIGRES=sigres.filepath)
return results
class BseTask(ManyBodyTask):
"""
Task for Bethe-Salpeter calculations.
.. note::
The BSE codes provides both iterative and direct schemes for the computation of the dielectric function.
The direct diagonalization cannot be restarted whereas Haydock and CG support restarting.
"""
CRITICAL_EVENTS = [
events.HaydockConvergenceWarning,
#events.BseIterativeDiagoConvergenceWarning,
]
color_rgb = np.array((128, 0, 255)) / 255
def restart(self):
"""
BSE calculations with Haydock can be restarted only if we have the
excitonic Hamiltonian and the HAYDR_SAVE file.
"""
# TODO: This version seems to work but the main output file is truncated
# TODO: Handle restart if CG method is used
# TODO: restart should receive a list of critical events
# the log file is complete though.
irdvars = {}
# Move the BSE blocks to indata.
# This is done only once at the end of the first run.
# Successive restarts will use the BSR|BSC files in the indir directory
# to initialize the excitonic Hamiltonian
count = 0
for ext in ("BSR", "BSC"):
ofile = self.outdir.has_abiext(ext)
if ofile:
count += 1
irdvars.update(irdvars_for_ext(ext))
self.out_to_in(ofile)
if not count:
# outdir does not contain the BSR|BSC file.
# This means that num_restart > 1 and the files should be in task.indir
count = 0
for ext in ("BSR", "BSC"):
ifile = self.indir.has_abiext(ext)
if ifile:
count += 1
if not count:
raise self.RestartError("%s: Cannot find BSR|BSC files in %s" % (self, self.indir))
# Rename HAYDR_SAVE files
count = 0
for ext in ("HAYDR_SAVE", "HAYDC_SAVE"):
ofile = self.outdir.has_abiext(ext)
if ofile:
count += 1
irdvars.update(irdvars_for_ext(ext))
self.out_to_in(ofile)
if not count:
raise self.RestartError("%s: Cannot find the HAYDR_SAVE file to restart from." % self)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
#self.history.info("Will restart from %s", restart_file)
return self._restart()
#def inspect(self, **kwargs):
# """
# Plot the Haydock iterations with matplotlib.
#
# Returns
# `matplotlib` figure, None if some error occurred.
# """
# haydock_cycle = abiinspect.HaydockIterations.from_file(self.output_file.path)
# if haydock_cycle is not None:
# if "title" not in kwargs: kwargs["title"] = str(self)
# return haydock_cycle.plot(**kwargs)
@property
def mdf_path(self):
"""Absolute path of the MDF file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._mdf_path
except AttributeError:
path = self.outdir.has_abiext("MDF.nc")
if path: self._mdf_path = path
return path
def open_mdf(self):
"""
Open the MDF file located in the in self.outdir.
Returns :class:`MdfFile` object, None if file could not be found or file is not readable.
"""
mdf_path = self.mdf_path
if not mdf_path:
logger.critical("%s didn't produce a MDF file in %s" % (self, self.outdir))
return None
# Open the DFF file and add its data to results.out
from abipy.electrons.bse import MdfFile
try:
return MdfFile(mdf_path)
except Exception as exc:
logger.critical("Exception while reading MDF file at %s:\n%s" % (mdf_path, str(exc)))
return None
def get_results(self, **kwargs):
results = super(BseTask, self).get_results(**kwargs)
with self.open_mdf() as mdf:
#results["out"].update(mdf.as_dict())
#epsilon_infinity optical_gap
results.register_gridfs_files(MDF=mdf.filepath)
return results
class OpticTask(Task):
"""
Task for the computation of optical spectra with optic i.e.
RPA without local-field effects and velocity operator computed from DDK files.
"""
color_rgb = np.array((255, 204, 102)) / 255
def __init__(self, optic_input, nscf_node, ddk_nodes, workdir=None, manager=None):
"""
Create an instance of :class:`OpticTask` from an string containing the input.
Args:
optic_input: string with the optic variables (filepaths will be added at run time).
nscf_node: The NSCF task that will produce thw WFK file or string with the path of the WFK file.
ddk_nodes: List of :class:`DdkTask` nodes that will produce the DDK files or list of DDF paths.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
# Convert paths to FileNodes
self.nscf_node = Node.as_node(nscf_node)
self.ddk_nodes = [Node.as_node(n) for n in ddk_nodes]
assert len(ddk_nodes) == 3
#print(self.nscf_node, self.ddk_nodes)
# Use DDK extension instead of 1WF
deps = {n: "1WF" for n in self.ddk_nodes}
#deps = {n: "DDK" for n in self.ddk_nodes}
deps.update({self.nscf_node: "WFK"})
super(OpticTask, self).__init__(optic_input, workdir=workdir, manager=manager, deps=deps)
def set_workdir(self, workdir, chroot=False):
"""Set the working directory of the task."""
super(OpticTask, self).set_workdir(workdir, chroot=chroot)
# Small hack: the log file of optics is actually the main output file.
self.output_file = self.log_file
@deprecated(message="_set_inpvars is deprecated. Use set_vars")
def _set_inpvars(self, *args, **kwargs):
return self.set_vars(*args, **kwargs)
def set_vars(self, *args, **kwargs):
"""
Optic does not use `get` or `ird` variables hence we should never try
to change the input when we connect this task
"""
kwargs.update(dict(*args))
self.history.info("OpticTask intercepted set_vars with args %s" % kwargs)
if "autoparal" in kwargs: self.input.set_vars(autoparal=kwargs["autoparal"])
if "max_ncpus" in kwargs: self.input.set_vars(max_ncpus=kwargs["max_ncpus"])
@property
def executable(self):
"""Path to the executable required for running the :class:`OpticTask`."""
try:
return self._executable
except AttributeError:
return "optic"
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
#optic.in ! Name of input file
#optic.out ! Unused
#optic ! Root name for all files that will be produced
app(self.input_file.path) # Path to the input file
app(os.path.join(self.workdir, "unused")) # Path to the output file
app(os.path.join(self.workdir, self.prefix.odata)) # Prefix for output data
return "\n".join(lines)
@property
def wfk_filepath(self):
"""Returns (at runtime) the absolute path of the WFK file produced by the NSCF run."""
return self.nscf_node.outdir.has_abiext("WFK")
@property
def ddk_filepaths(self):
"""Returns (at runtime) the absolute path of the DDK files produced by the DDK runs."""
return [ddk_task.outdir.has_abiext("1WF") for ddk_task in self.ddk_nodes]
def make_input(self):
"""Construct and write the input file of the calculation."""
# Set the file paths.
all_files ={"ddkfile_"+str(n+1) : ddk for n,ddk in enumerate(self.ddk_filepaths)}
all_files.update({"wfkfile" : self.wfk_filepath})
files_nml = {"FILES" : all_files}
files= nmltostring(files_nml)
# Get the input specified by the user
user_file = nmltostring(self.input.as_dict())
# Join them.
return files + user_file
def setup(self):
"""Public method called before submitting the task."""
def make_links(self):
"""
Optic allows the user to specify the paths of the input file.
hence we don't need to create symbolic links.
"""
def get_results(self, **kwargs):
results = super(OpticTask, self).get_results(**kwargs)
#results.update(
#"epsilon_infinity":
#))
return results
def fix_abicritical(self):
"""
Cannot fix abicritical errors for optic
"""
return 0
#@check_spectator
def reset_from_scratch(self):
"""
restart from scratch, this is to be used if a job is restarted with more resources after a crash
"""
# Move output files produced in workdir to _reset otherwise check_status continues
# to see the task as crashed even if the job did not run
# Create reset directory if not already done.
reset_dir = os.path.join(self.workdir, "_reset")
reset_file = os.path.join(reset_dir, "_counter")
if not os.path.exists(reset_dir):
os.mkdir(reset_dir)
num_reset = 1
else:
with open(reset_file, "rt") as fh:
num_reset = 1 + int(fh.read())
# Move files to reset and append digit with reset index.
def move_file(f):
if not f.exists: return
try:
f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset)))
except OSError as exc:
logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc)))
for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file", "mpiabort_file"):
move_file(getattr(self, fname))
with open(reset_file, "wt") as fh:
fh.write(str(num_reset))
self.start_lockfile.remove()
# Reset datetimes
self.datetimes.reset()
return self._restart(submit=False)
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
General strategy, first try to increase resources in order to fix the problem,
if this is not possible, call a task specific method to attempt to decrease the demands.
Returns:
1 if task has been fixed else 0.
"""
from pymatgen.io.abinit.scheduler_error_parsers import NodeFailureError, MemoryCancelError, TimeCancelError
#assert isinstance(self.manager, TaskManager)
if not self.queue_errors:
if self.mem_scales or self.load_scales:
try:
self.manager.increase_resources() # acts either on the policy or on the qadapter
self.reset_from_scratch()
return
except ManagerIncreaseError:
self.set_status(self.S_ERROR, msg='unknown queue error, could not increase resources any further')
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='unknown queue error, no options left')
raise FixQueueCriticalError
else:
for error in self.queue_errors:
logger.info('fixing: %s' % str(error))
if isinstance(error, NodeFailureError):
# if the problematic node is known, exclude it
if error.nodes is not None:
try:
self.manager.exclude_nodes(error.nodes)
self.reset_from_scratch()
self.set_status(self.S_READY, msg='excluding nodes')
except:
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='Node error but no node identified.')
raise FixQueueCriticalError
elif isinstance(error, MemoryCancelError):
# ask the qadapter to provide more resources, i.e. more cpu's so more total memory if the code
# scales this should fix the memeory problem
# increase both max and min ncpu of the autoparalel and rerun autoparalel
if self.mem_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased ncps to solve memory problem')
return
except ManagerIncreaseError:
logger.warning('increasing ncpus failed')
# if the max is reached, try to increase the memory per cpu:
try:
self.manager.increase_mem()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased mem')
return
except ManagerIncreaseError:
logger.warning('increasing mem failed')
# if this failed ask the task to provide a method to reduce the memory demand
try:
self.reduce_memory_demand()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='decreased mem demand')
return
except DecreaseDemandsError:
logger.warning('decreasing demands failed')
msg = ('Memory error detected but the memory could not be increased neigther could the\n'
'memory demand be decreased. Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
raise FixQueueCriticalError
elif isinstance(error, TimeCancelError):
# ask the qadapter to provide more time
try:
self.manager.increase_time()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased wall time')
return
except ManagerIncreaseError:
logger.warning('increasing the waltime failed')
# if this fails ask the qadapter to increase the number of cpus
if self.load_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased number of cpus')
return
except ManagerIncreaseError:
logger.warning('increase ncpus to speed up the calculation to stay in the walltime failed')
# if this failed ask the task to provide a method to speed up the task
try:
self.speed_up()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='task speedup')
return
except DecreaseDemandsError:
logger.warning('decreasing demands failed')
msg = ('Time cancel error detected but the time could not be increased neither could\n'
'the time demand be decreased by speedup of increasing the number of cpus.\n'
'Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
else:
msg = 'No solution provided for error %s. Unrecoverable error.' % error.name
self.set_status(self.S_ERROR, msg)
return 0
def autoparal_run(self):
"""
Find an optimal set of parameters for the execution of the Optic task
This method can change the submission parameters e.g. the number of CPUs for MPI and OpenMp.
Returns 0 if success
"""
policy = self.manager.policy
if policy.autoparal == 0: # or policy.max_ncpus in [None, 1]:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
############################################################################
# Run ABINIT in sequential to get the possible configurations with max_ncpus
############################################################################
# Set the variables for automatic parallelization
# Will get all the possible configurations up to max_ncpus
# Return immediately if max_ncpus == 1
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus)
self.set_vars(autoparal_vars)
# Run the job in a shell subprocess with mpi_procs = 1
# we don't want to make a request to the queue manager for this simple job!
# Return code is always != 0
process = self.manager.to_shell_manager(mpi_procs=1).launch(self)
self.history.pop()
retcode = process.wait()
# Remove the variables added for the automatic parallelization
self.input.remove_vars(autoparal_vars.keys())
##############################################################
# Parse the autoparal configurations from the main output file
##############################################################
parser = ParalHintsParser()
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
######################################################
# Select the optimal configuration according to policy
######################################################
#optconf = self.find_optconf(pconfs)
# Select the partition on which we'll be running and set MPI/OMP cores.
optconf = self.manager.select_qadapter(pconfs)
####################################################
# Change the input file and/or the submission script
####################################################
self.set_vars(optconf.vars)
# Write autoparal configurations to JSON file.
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
##############
# Finalization
##############
# Reset the status, remove garbage files ...
self.set_status(self.S_INIT, msg='finished auto paralell')
# Remove the output file since Abinit likes to create new files
# with extension .outA, .outB if the file already exists.
os.remove(self.output_file.path)
#os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0
class AnaddbTask(Task):
"""Task for Anaddb runs (post-processing of DFPT calculations)."""
color_rgb = np.array((204, 102, 255)) / 255
def __init__(self, anaddb_input, ddb_node,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Create an instance of :class:`AnaddbTask` from a string containing the input.
Args:
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
gkk_node: The node that will produce the GKK file (optional). Accept :class:`Task`, :class:`Work` or filepath.
md_node: The node that will produce the MD file (optional). Accept `Task`, `Work` or filepath.
gkk_node: The node that will produce the GKK file (optional). Accept `Task`, `Work` or filepath.
workdir: Path to the working directory (optional).
manager: :class:`TaskManager` object (optional).
"""
# Keep a reference to the nodes.
self.ddb_node = Node.as_node(ddb_node)
deps = {self.ddb_node: "DDB"}
self.gkk_node = Node.as_node(gkk_node)
if self.gkk_node is not None:
deps.update({self.gkk_node: "GKK"})
# I never used it!
self.md_node = Node.as_node(md_node)
if self.md_node is not None:
deps.update({self.md_node: "MD"})
self.ddk_node = Node.as_node(ddk_node)
if self.ddk_node is not None:
deps.update({self.ddk_node: "DDK"})
super(AnaddbTask, self).__init__(input=anaddb_input, workdir=workdir, manager=manager, deps=deps)
@classmethod
def temp_shell_task(cls, inp, ddb_node,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Build a :class:`AnaddbTask` with a temporary workdir. The task is executed via
the shell with 1 MPI proc. Mainly used for post-processing the DDB files.
Args:
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
See `AnaddbInit` for the meaning of the other arguments.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
return cls(inp, ddb_node,
gkk_node=gkk_node, md_node=md_node, ddk_node=ddk_node,
workdir=workdir, manager=manager.to_shell_manager(mpi_procs=1))
@property
def executable(self):
"""Path to the executable required for running the :class:`AnaddbTask`."""
try:
return self._executable
except AttributeError:
return "anaddb"
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
app(self.input_file.path) # 1) Path of the input file
app(self.output_file.path) # 2) Path of the output file
app(self.ddb_filepath) # 3) Input derivative database e.g. t13.ddb.in
app(self.md_filepath) # 4) Output molecular dynamics e.g. t13.md
app(self.gkk_filepath) # 5) Input elphon matrix elements (GKK file)
app(self.outdir.path_join("out")) # 6) Base name for elphon output files e.g. t13
app(self.ddk_filepath) # 7) File containing ddk filenames for elphon/transport.
return "\n".join(lines)
@property
def ddb_filepath(self):
"""Returns (at runtime) the absolute path of the input DDB file."""
# This is not very elegant! A possible approach could to be path self.ddb_node.outdir!
if isinstance(self.ddb_node, FileNode): return self.ddb_node.filepath
path = self.ddb_node.outdir.has_abiext("DDB")
return path if path else "DDB_FILE_DOES_NOT_EXIST"
@property
def md_filepath(self):
"""Returns (at runtime) the absolute path of the input MD file."""
if self.md_node is None: return "MD_FILE_DOES_NOT_EXIST"
if isinstance(self.md_node, FileNode): return self.md_node.filepath
path = self.md_node.outdir.has_abiext("MD")
return path if path else "MD_FILE_DOES_NOT_EXIST"
@property
def gkk_filepath(self):
"""Returns (at runtime) the absolute path of the input GKK file."""
if self.gkk_node is None: return "GKK_FILE_DOES_NOT_EXIST"
if isinstance(self.gkk_node, FileNode): return self.gkk_node.filepath
path = self.gkk_node.outdir.has_abiext("GKK")
return path if path else "GKK_FILE_DOES_NOT_EXIST"
@property
def ddk_filepath(self):
"""Returns (at runtime) the absolute path of the input DKK file."""
if self.ddk_node is None: return "DDK_FILE_DOES_NOT_EXIST"
if isinstance(self.ddk_node, FileNode): return self.ddk_node.filepath
path = self.ddk_node.outdir.has_abiext("DDK")
return path if path else "DDK_FILE_DOES_NOT_EXIST"
def setup(self):
"""Public method called before submitting the task."""
def make_links(self):
"""
Anaddb allows the user to specify the paths of the input file.
hence we don't need to create symbolic links.
"""
def open_phbst(self):
"""Open PHBST file produced by Anaddb and returns :class:`PhbstFile` object."""
from abipy.dfpt.phonons import PhbstFile
phbst_path = os.path.join(self.workdir, "run.abo_PHBST.nc")
if not phbst_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a PHBST file in %s" % (self, self.outdir))
return None
try:
return PhbstFile(phbst_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (phbst_path, str(exc)))
return None
def open_phdos(self):
"""Open PHDOS file produced by Anaddb and returns :class:`PhdosFile` object."""
from abipy.dfpt.phonons import PhdosFile
phdos_path = os.path.join(self.workdir, "run.abo_PHDOS.nc")
if not phdos_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a PHBST file in %s" % (self, self.outdir))
return None
try:
return PhdosFile(phdos_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (phdos_path, str(exc)))
return None
def get_results(self, **kwargs):
results = super(AnaddbTask, self).get_results(**kwargs)
return results
| mit |
QuLogic/iris | lib/iris/tests/unit/analysis/cartography/test__quadrant_area.py | 6 | 5433 | # (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.analysis.cartography._quadrant_area` function"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import cf_units
import numpy as np
import iris
from iris.analysis.cartography import _quadrant_area
from iris.analysis.cartography import DEFAULT_SPHERICAL_EARTH_RADIUS
class TestExampleCases(tests.IrisTest):
def _radian_bounds(self, coord_list, dtype):
bound_deg = np.array(coord_list, dtype=dtype)
bound_deg = np.atleast_2d(bound_deg)
degrees = cf_units.Unit("degrees")
radians = cf_units.Unit("radians")
return degrees.convert(bound_deg, radians)
def _as_bounded_coords(self, lats, lons, dtype=np.float64):
return (self._radian_bounds(lats, dtype=dtype),
self._radian_bounds(lons, dtype=dtype))
def test_area_in_north(self):
lats, lons = self._as_bounded_coords([0, 10], [0, 10])
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, [[1228800593851.443115234375]])
def test_area_in_far_north(self):
lats, lons = self._as_bounded_coords([70, 80], [0, 10])
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, [[319251845980.7646484375]])
def test_area_in_far_south(self):
lats, lons = self._as_bounded_coords([-80, -70], [0, 10])
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, [[319251845980.763671875]])
def test_area_in_north_with_reversed_lats(self):
lats, lons = self._as_bounded_coords([10, 0], [0, 10])
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, [[1228800593851.443115234375]])
def test_area_multiple_lats(self):
lats, lons = self._as_bounded_coords([[-80, -70], [0, 10], [70, 80]],
[0, 10])
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, [[319251845980.763671875],
[1228800593851.443115234375],
[319251845980.7646484375]])
def test_area_multiple_lats_and_lons(self):
lats, lons = self._as_bounded_coords([[-80, -70], [0, 10], [70, 80]],
[[0, 10], [10, 30]])
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, [[3.19251846e+11, 6.38503692e+11],
[1.22880059e+12, 2.45760119e+12],
[3.19251846e+11, 6.38503692e+11]])
def test_symmetric_64_bit(self):
lats, lons = self._as_bounded_coords([[-90, -89.375],
[89.375, 90]],
[0, 10],
dtype=np.float64)
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, area[::-1])
def test_symmetric_32_bit(self):
lats, lons = self._as_bounded_coords([[-90, -89.375],
[89.375, 90]],
[0, 10],
dtype=np.float32)
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, area[::-1])
class TestErrorHandling(tests.IrisTest):
def test_lat_bounds_1d_error(self):
self._assert_error_on_malformed_bounds(
[0, 10],
[[0, 10]])
def test_lon_bounds_1d_error(self):
self._assert_error_on_malformed_bounds(
[[0, 10]],
[0, 10])
def test_too_many_lat_bounds_error(self):
self._assert_error_on_malformed_bounds(
[[0, 10, 20]],
[[0, 10]])
def test_too_many_lon_bounds_error(self):
self._assert_error_on_malformed_bounds(
[[0, 10]],
[[0, 10, 20]])
def _assert_error_on_malformed_bounds(self, lat_bnds, lon_bnds):
with self.assertRaisesRegexp(ValueError,
'Bounds must be \[n,2\] array'):
_quadrant_area(np.array(lat_bnds),
np.array(lon_bnds),
1.)
if __name__ == '__main__':
tests.main()
| gpl-3.0 |
sorenk/ansible | lib/ansible/plugins/lookup/password.py | 20 | 12097 | # (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2013, Javier Candeira <javier@candeira.com>
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: password
version_added: "1.1"
author:
- Daniel Hokka Zakrisson <daniel@hozac.com>
- Javier Candeira <javier@candeira.com>
- Maykel Moya <mmoya@speedyrails.com>
short_description: retrieve or generate a random password, stored in a file
description:
- Generates a random plaintext password and stores it in a file at a given filepath.
- If the file exists previously, it will retrieve its contents, behaving just like with_file.
- 'Usage of variables like C("{{ inventory_hostname }}") in the filepath can be used to set up random passwords per host,
which simplifies password management in C("host_vars") variables.'
- A special case is using /dev/null as a path. The password lookup will generate a new random password each time,
but will not write it to /dev/null. This can be used when you need a password without storing it on the controller.
options:
_terms:
description:
- path to the file that stores/will store the passwords
required: True
encrypt:
description:
- Whether the user requests that this password is returned encrypted or in plain text.
- Note that the password is always stored as plain text.
- Encrypt also forces saving the salt value for idempotence.
type: boolean
default: True
chars:
version_added: "1.4"
description:
- Define comma separated list of names that compose a custom character set in the generated passwords.
- 'By default generated passwords contain a random mix of upper and lowercase ASCII letters, the numbers 0-9 and punctuation (". , : - _").'
- "They can be either parts of Python's string module attributes (ascii_letters,digits, etc) or are used literally ( :, -)."
- "To enter comma use two commas ',,' somewhere - preferably at the end. Quotes and double quotes are not supported."
type: string
length:
description: The length of the generated password.
default: 20
type: integer
notes:
- A great alternative to the password lookup plugin,
if you don't need to generate random passwords on a per-host basis,
would be to use Vault in playbooks.
Read the documentation there and consider using it first,
it will be more desirable for most applications.
- If the file already exists, no data will be written to it.
If the file has contents, those contents will be read in as the password.
Empty files cause the password to return as an empty string.
- 'As all lookups, this runs on the Ansible host as the user running the playbook, and "become" does not apply,
the target file must be readable by the playbook user, or, if it does not exist,
the playbook user must have sufficient privileges to create it.
(So, for example, attempts to write into areas such as /etc will fail unless the entire playbook is being run as root).'
"""
EXAMPLES = """
- name: create a mysql user with a random password
mysql_user:
name: "{{ client }}"
password: "{{ lookup('password', 'credentials/' + client + '/' + tier + '/' + role + '/mysqlpassword length=15') }}"
priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
- name: create a mysql user with a random password using only ascii letters
mysql_user: name={{ client }} password="{{ lookup('password', '/tmp/passwordfile chars=ascii_letters') }}" priv='{{ client }}_{{ tier }}_{{ role }}.*:ALL'
- name: create a mysql user with a random password using only digits
mysql_user:
name: "{{ client }}"
password: "{{ lookup('password', '/tmp/passwordfile chars=digits') }}"
priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
- name: create a mysql user with a random password using many different char sets
mysql_user:
name: "{{ client }}"
password: "{{ lookup('password', '/tmp/passwordfile chars=ascii_letters,digits,hexdigits,punctuation') }}"
priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
"""
RETURN = """
_raw:
description:
- a password
"""
import os
import string
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
from ansible.utils.encrypt import do_encrypt, random_password
from ansible.utils.path import makedirs_safe
DEFAULT_LENGTH = 20
VALID_PARAMS = frozenset(('length', 'encrypt', 'chars'))
def _parse_parameters(term):
"""Hacky parsing of params
See https://github.com/ansible/ansible-modules-core/issues/1968#issuecomment-136842156
and the first_found lookup For how we want to fix this later
"""
first_split = term.split(' ', 1)
if len(first_split) <= 1:
# Only a single argument given, therefore it's a path
relpath = term
params = dict()
else:
relpath = first_split[0]
params = parse_kv(first_split[1])
if '_raw_params' in params:
# Spaces in the path?
relpath = u' '.join((relpath, params['_raw_params']))
del params['_raw_params']
# Check that we parsed the params correctly
if not term.startswith(relpath):
# Likely, the user had a non parameter following a parameter.
# Reject this as a user typo
raise AnsibleError('Unrecognized value after key=value parameters given to password lookup')
# No _raw_params means we already found the complete path when
# we split it initially
# Check for invalid parameters. Probably a user typo
invalid_params = frozenset(params.keys()).difference(VALID_PARAMS)
if invalid_params:
raise AnsibleError('Unrecognized parameter(s) given to password lookup: %s' % ', '.join(invalid_params))
# Set defaults
params['length'] = int(params.get('length', DEFAULT_LENGTH))
params['encrypt'] = params.get('encrypt', None)
params['chars'] = params.get('chars', None)
if params['chars']:
tmp_chars = []
if u',,' in params['chars']:
tmp_chars.append(u',')
tmp_chars.extend(c for c in params['chars'].replace(u',,', u',').split(u',') if c)
params['chars'] = tmp_chars
else:
# Default chars for password
params['chars'] = [u'ascii_letters', u'digits', u".,:-_"]
return relpath, params
def _read_password_file(b_path):
"""Read the contents of a password file and return it
:arg b_path: A byte string containing the path to the password file
:returns: a text string containing the contents of the password file or
None if no password file was present.
"""
content = None
if os.path.exists(b_path):
with open(b_path, 'rb') as f:
b_content = f.read().rstrip()
content = to_text(b_content, errors='surrogate_or_strict')
return content
def _gen_candidate_chars(characters):
'''Generate a string containing all valid chars as defined by ``characters``
:arg characters: A list of character specs. The character specs are
shorthand names for sets of characters like 'digits', 'ascii_letters',
or 'punctuation' or a string to be included verbatim.
The values of each char spec can be:
* a name of an attribute in the 'strings' module ('digits' for example).
The value of the attribute will be added to the candidate chars.
* a string of characters. If the string isn't an attribute in 'string'
module, the string will be directly added to the candidate chars.
For example::
characters=['digits', '?|']``
will match ``string.digits`` and add all ascii digits. ``'?|'`` will add
the question mark and pipe characters directly. Return will be the string::
u'0123456789?|'
'''
chars = []
for chars_spec in characters:
# getattr from string expands things like "ascii_letters" and "digits"
# into a set of characters.
chars.append(to_text(getattr(string, to_native(chars_spec), chars_spec),
errors='strict'))
chars = u''.join(chars).replace(u'"', u'').replace(u"'", u'')
return chars
def _random_salt():
"""Return a text string suitable for use as a salt for the hash functions we use to encrypt passwords.
"""
# Note passlib salt values must be pure ascii so we can't let the user
# configure this
salt_chars = _gen_candidate_chars(['ascii_letters', 'digits', './'])
return random_password(length=8, chars=salt_chars)
def _parse_content(content):
'''parse our password data format into password and salt
:arg content: The data read from the file
:returns: password and salt
'''
password = content
salt = None
salt_slug = u' salt='
try:
sep = content.rindex(salt_slug)
except ValueError:
# No salt
pass
else:
salt = password[sep + len(salt_slug):]
password = content[:sep]
return password, salt
def _format_content(password, salt, encrypt=True):
"""Format the password and salt for saving
:arg password: the plaintext password to save
:arg salt: the salt to use when encrypting a password
:arg encrypt: Whether the user requests that this password is encrypted.
Note that the password is saved in clear. Encrypt just tells us if we
must save the salt value for idempotence. Defaults to True.
:returns: a text string containing the formatted information
.. warning:: Passwords are saved in clear. This is because the playbooks
expect to get cleartext passwords from this lookup.
"""
if not encrypt and not salt:
return password
# At this point, the calling code should have assured us that there is a salt value.
if not salt:
raise AnsibleAssertionError('_format_content was called with encryption requested but no salt value')
return u'%s salt=%s' % (password, salt)
def _write_password_file(b_path, content):
b_pathdir = os.path.dirname(b_path)
makedirs_safe(b_pathdir, mode=0o700)
with open(b_path, 'wb') as f:
os.chmod(b_path, 0o600)
b_content = to_bytes(content, errors='surrogate_or_strict') + b'\n'
f.write(b_content)
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
relpath, params = _parse_parameters(term)
path = self._loader.path_dwim(relpath)
b_path = to_bytes(path, errors='surrogate_or_strict')
chars = _gen_candidate_chars(params['chars'])
changed = False
content = _read_password_file(b_path)
if content is None or b_path == to_bytes('/dev/null'):
plaintext_password = random_password(params['length'], chars)
salt = None
changed = True
else:
plaintext_password, salt = _parse_content(content)
if params['encrypt'] and not salt:
changed = True
salt = _random_salt()
if changed and b_path != to_bytes('/dev/null'):
content = _format_content(plaintext_password, salt, encrypt=params['encrypt'])
_write_password_file(b_path, content)
if params['encrypt']:
password = do_encrypt(plaintext_password, params['encrypt'], salt=salt)
ret.append(password)
else:
ret.append(plaintext_password)
return ret
| gpl-3.0 |
stdweird/aquilon | lib/python2.6/aquilon/worker/commands/show_dns_domain_dns_domain.py | 2 | 1383 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq show dns_domain --dns_domain`."""
from sqlalchemy.orm import undefer
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.formats.dns_domain import DNSDomainList
from aquilon.aqdb.model import DnsDomain
class CommandShowDnsDomainDnsDomain(BrokerCommand):
required_parameters = ["dns_domain"]
def render(self, session, dns_domain, **arguments):
options = [undefer('comments')]
return DNSDomainList([DnsDomain.get_unique(session, dns_domain,
compel=True,
query_options=options)])
| apache-2.0 |
gunzy83/ansible-modules-extras | network/wakeonlan.py | 28 | 3882 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: wakeonlan
version_added: 2.2
short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
description:
- The M(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
options:
mac:
description:
- MAC address to send Wake-on-LAN broadcast packet for
required: true
default: null
broadcast:
description:
- Network broadcast address to use for broadcasting magic Wake-on-LAN packet
required: false
default: 255.255.255.255
port:
description:
- UDP port to use for magic Wake-on-LAN packet
required: false
default: 7
author: "Dag Wieers (@dagwieers)"
todo:
- Add arping support to check whether the system is up (before and after)
- Enable check-mode support (when we have arping support)
- Does not have SecureOn password support
notes:
- This module sends a magic packet, without knowing whether it worked
- Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS)
- Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first) when turned off
'''
EXAMPLES = '''
# Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
- local_action: wakeonlan mac=00:00:5E:00:53:66 broadcast=192.0.2.23
- wakeonlan: mac=00:00:5E:00:53:66 port=9
delegate_to: localhost
'''
RETURN='''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import socket
import struct
def wakeonlan(module, mac, broadcast, port):
""" Send a magic Wake-on-LAN packet. """
mac_orig = mac
# Remove possible seperator from MAC address
if len(mac) == 12 + 5:
mac = mac.replace(mac[2], '')
# If we don't end up with 12 hexadecimal characters, fail
if len(mac) != 12:
module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
# Test if it converts to an integer, otherwise fail
try:
int(mac, 16)
except ValueError:
module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
# Create payload for magic packet
data = ''
padding = ''.join(['FFFFFFFFFFFF', mac * 20])
for i in range(0, len(padding), 2):
data = ''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
# Broadcast payload to network
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
try:
sock.sendto(data, (broadcast, port))
except socket.error:
e = get_exception()
module.fail_json(msg=str(e))
def main():
module = AnsibleModule(
argument_spec = dict(
mac = dict(required=True, type='str'),
broadcast = dict(required=False, default='255.255.255.255'),
port = dict(required=False, type='int', default=7),
),
)
mac = module.params.get('mac')
broadcast = module.params.get('broadcast')
port = module.params.get('port')
wakeonlan(module, mac, broadcast, port)
module.exit_json(changed=True)
if __name__ == '__main__':
main()
| gpl-3.0 |
ivanprjcts/equinox-spring16-API | makesdks/swagger_reader.py | 1 | 1557 | import os
import json
from sdklib.util.urlvalidator import urlsplit
class SwaggerReader(object):
def __init__(self, folder):
files = os.listdir(folder)
self.apis = []
for file_elem in files:
print '%s/%s' % (folder, file_elem)
f = open('%s/%s' % (folder, file_elem), 'r')
read_file = f.read().replace('\n', '')
f.close()
load_json = json.loads(read_file)
self.apis.append(load_json)
def read_json_apis(self):
json_model = dict()
for api in self.apis:
scheme, host, port = urlsplit(api["basePath"])
if scheme and "scheme" not in json_model:
json_model["scheme"] = scheme
if host and "apiHost" not in json_model:
json_model["apiHost"] = host
if port and "port" not in json_model:
json_model["port"] = port
for method in api["apis"]:
if "urls" not in json_model:
json_model["urls"] = dict()
methods = dict()
operations = method["operations"]
for operation in operations:
methods[operation["method"]] = operation["parameters"]
json_model["urls"][method["path"]] = methods
return json_model
if __name__ == "__main__":
r = SwaggerReader('json/swagger')
s = r.read_json_apis()
f = open('json/swaggerApi.json', "w")
f.write(json.dumps(s))
f.close()
print json.dumps(s)
| lgpl-3.0 |
braingram/pysump | sump/interface.py | 1 | 10286 | #!/usr/bin/env python
# import sys
import logging
import struct
import serial
from . import errors
from . import fio
from . import ops
# import settings this as settings_module to avoid name conflicts
from . import settings as settings_module
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
class Interface(object):
# undivided clock rate, in Hz, from testing with OBLS
protocol_version = '1.0'
def __init__(
self, path='/dev/ttyACM0', baud=115200,
timeout=None, settings=None, **kwargs):
self.timeout = timeout
if settings is None:
self.settings = settings_module.Settings(**kwargs)
else:
self.settings = settings
if len(kwargs):
for kw in kwargs:
setattr(self.settings, kw, kwargs[kw])
self.port = serial.Serial(path, baud, timeout=self.timeout)
self.debug_logger = None
self.reset()
self.metadata = self.query_metadata()
self.send_settings()
def reset(self):
logger.debug("reset")
self.port.write('\x00\x00\x00\x00\x00')
def capture(self, send_settings=True):
'''Request a capture.'''
logger.debug("capture")
if send_settings:
self.send_settings()
# get local references to objects for faster execution ..
logger.debug("building unpack functions")
ufs = []
for i in xrange(4):
if not (self.settings.channel_groups & (0b1 << i)):
ufs.append(lambda c, si=i: ord(c) << (8 * si))
d = []
self.port.timeout = self.settings.timeout
logger.debug("starting capture")
self.port.write('\x01') # start the capture
logger.debug("reading capture")
for i in xrange(self.settings.read_count):
v = 0
for uf in ufs:
v |= uf(self.port.read(1))
d.append(v)
self.reset() # TODO is this needed?
if self.settings.latest_first:
return d[::-1]
else:
return d
def save(self, capture, filename, meta=None):
logger.debug("save %s", filename)
fio.save(capture, filename, self.settings, meta)
def id_string(self):
'''Return device's SUMP ID string.'''
logger.debug("id_string")
self.port.write('\x02')
# TODO check protocol version here
val = self.port.read(4) # 4 bytes as a small-endian int
return val[::-1]
def xon(self):
logger.debug("xon")
self.port.write('\x11')
def xoff(self):
logger.debug("xoff")
self.port.write('\x13')
def _send_trigger_mask(self, stage, mask):
logger.debug("send_trigger_mask %s %s", stage, mask)
#w = self.port.write
#w = self._trace_control('Trigger mask')
msg = struct.pack('<Bi', 0xC0 | (stage << 2), mask)
self.port.write(msg)
#w(chr(0xC0 | (stage << 2)))
#w(chr(mask & 0xFF))
#w(chr((mask >> 8) & 0xFF))
#w(chr((mask >> 16) & 0xFF))
#w(chr((mask >> 24) & 0xFF))
def _send_trigger_value(self, stage, value):
logger.debug("send_trigger_value %s %s", stage, value)
#w = self.port.write
#w = self._trace_control('Trigger values')
msg = struct.pack('<Bi', 0xC1 | (stage << 2), value)
self.port.write(msg)
#w(chr(0xC1 | (stage << 2)))
#w(chr(values & 0xFF))
#w(chr((values >> 8) & 0xFF))
#w(chr((values >> 16) & 0xFF))
#w(chr((values >> 24) & 0xFF))
def _send_trigger_configuration(
self, stage, delay, channel, level, start, serial):
logger.debug(
"send_trigger_configuration %s %s %s %s %s %s",
stage, delay, channel, level, start, serial)
msg = struct.pack(
'<BHBB',
0xC2 | (stage << 2),
delay,
((channel & 0x0F) << 4) | level,
(start << 3) | (serial << 2) | ((channel & 0x10) >> 4))
self.port.write(msg)
#w = self.port.write
#w = self._trace_control('Trigger config')
#w(chr(0xC2 | (stage << 2)))
#d = delay
#w(chr(d & 0xFF))
#w(chr((d >> 8) & 0xFF))
#c = channel
#w(chr(((c & 0x0F) << 4) | level))
#w(chr((start << 3) | (serial << 2) | ((c & 0x10) >> 4)))
def send_divider_settings(self, settings):
logger.debug("send_divider_settings %s", settings.divider)
d = settings.divider - 1 # offset 1 correction for SUMP hardware
msg = struct.pack('<cHBx', '\x80', d & 0xFFFF, d >> 16)
self.port.write(msg)
#w = self.port.write
##w = self._trace_control('Divider')
#w('\x80')
#d = settings.divider - 1 # offset 1 correction for SUMP hardware
#w(chr(d & 0xFF))
#w(chr((d >> 8) & 0xFF))
#w(chr((d >> 16) & 0xFF))
#w('\x00')
def send_read_and_delay_count_settings(self, settings):
logger.debug("send_read_and_delay_count_settings")
#r = (settings.read_count + 3) >> 2
r = (settings.read_count // 4)
settings.read_count = r * 4
#d = (settings.delay_count + 3) >> 2
d = (settings.delay_count // 4)
settings.delay_count = d * 4
msg = struct.pack('<cHH', '\x81', r, d)
self.port.write(msg)
#w = self.port.write
##w = self._trace_control('Read/Delay')
#w('\x81')
## factor 4 correction for SUMP hardware
#r = (settings.read_count + 3) >> 2
#w(chr(r & 0xFF))
#w(chr((r >> 8) & 0xFF))
## factor 4 correction for SUMP hardware
#d = (settings.delay_count + 3) >> 2
#w(chr(d & 0xFF))
#w(chr((d >> 8) & 0xFF))
def send_flags_settings(self, settings):
logger.debug("send_flag_settings")
msg = struct.pack(
'<cBxxx', '\x82',
(settings.inverted << 7) | (settings.external << 6) |
(settings.channel_groups << 2) | (settings.filter << 1) |
settings.demux)
self.port.write(msg)
#w = self.port.write
##w = self._trace_control('Flags')
#w('\x82')
#w(chr((settings.inverted << 7)
# | (settings.external << 6)
# | (settings.channel_groups << 2)
# | (settings.filter << 1)
# | settings.demux
# ))
## disable RLE compression, alternate number scheme, test modes
#w('\x00')
#w('\x00')
#w('\x00')
def send_settings(self):
"""
The order of things in this function are CRITICAL
"""
logger.debug("send_settings")
self.send_divider_settings(self.settings)
trigger_enable = self.settings.trigger_enable
if trigger_enable == 'None':
# send always-trigger trigger settings
for stage in xrange(self.settings.trigger_max_stages):
self._send_trigger_configuration(stage, 0, 0, 0, True, False)
self._send_trigger_mask(stage, 0)
self._send_trigger_value(stage, 0)
elif trigger_enable == 'Simple':
# set settings from stage 0, no-op for stages 1..3
self._send_trigger_configuration(
0, self.settings.trigger_stages[0].delay,
self.settings.trigger_stages[0].channel,
0, True, self.settings.trigger_stages[0].serial)
self._send_trigger_mask(0, self.settings.trigger_stages[0].mask)
self._send_trigger_value(0, self.settings.trigger_stages[0].value)
for stage in xrange(1, self.self.settings.trigger_max_stages):
self._send_trigger_configuration(stage, 0, 0, 0, False, False)
self._send_trigger_mask(stage, 0)
self._send_trigger_value(stage, 0)
elif trigger_enable == 'Complex':
for (i, stage) in enumerate(self.settings.trigger_stages):
# OLS needs things in this order
self._send_trigger_mask(i, stage.mask)
self._send_trigger_value(i, stage.value)
self._send_trigger_configuration(
i, stage.delay, stage.channel, stage.level, stage.start,
stage.serial)
else:
raise errors.TriggerEnableError
self.send_read_and_delay_count_settings(self.settings)
self.send_flags_settings(self.settings)
def query_metadata(self):
'''Return metadata identifying the SUMP device,
firmware, version, etc.'''
logger.debug("query_metadata")
result = []
self.reset()
r = self.port.read
timeout = self.port.timeout # save timeout setting to restore later
try:
# only wait 2 seconds for devices that don't do metadata
self.port.timeout = 2
self.port.write('\x04')
while True:
token = r(1)
if not token: # end-of-file
break
token = ord(token)
if not token: # binary 0 end-of-metadata marker
break
elif token <= 0x1F: # C-string follows token
v = []
while True:
x = r(1)
if x != '\0':
v .append(x)
else:
break
result.append((token, ''.join(v)))
elif token <= 0x3F: # 32-bit int follows token
result.append((token, ops.big_endian(r(4))))
elif token <= 0x5F: # 8-bit int follows token
result.append((token, ord(r(1))))
else:
result.append((token, None))
finally:
self.port.timeout = timeout # restore timeout setting
return result
def close(self):
logger.debug("close")
self.port.close()
self.port = None
def open_interface(port='/dev/ttyACM0', baud=115200, **kwargs):
i = Interface(port, baud, **kwargs)
return i
| gpl-3.0 |
vmalloc/dessert | tests/test_dessert.py | 1 | 3611 | from _pytest.assertion.rewrite import AssertionRewritingHook as PytestRewriteHook
import os
import shutil
import sys
from contextlib import contextmanager
from tempfile import mkdtemp
import emport
import dessert
import pytest
def test_dessert(module):
with pytest.raises(AssertionError) as error:
module.func()
assert 'dessert*' in str(error.value)
assert "where" in str(error.value)
assert "+" in str(error.value)
def test_disable_introspection(add_assert_message, module, assert_message):
with _disable_introspection():
with pytest.raises(AssertionError) as error:
module.func()
if not add_assert_message:
assert 'dessert*' in str(error.value)
assert "where" in str(error.value)
assert "+" in str(error.value)
else:
assert assert_message in str(error.value)
assert "+" not in str(error.value)
def test_warnings_from_rewrite(source_filename):
tmp_dir = os.path.dirname(source_filename)
full_path = os.path.join(tmp_dir, 'file_with_warnings.py')
with open(full_path, "w") as f:
f.write(r"""
import warnings
warnings.simplefilter('always')
warnings.warn('Some import warning')
def func():
assert True
""")
with dessert.rewrite_assertions_context():
with _disable_pytest_rewriting():
with pytest.warns(None) as caught:
emport.import_file(full_path)
[warning] = caught.list
assert warning.filename == full_path
@pytest.fixture(scope='session', autouse=True)
def mark_dessert():
# pylint: disable=protected-access
assert not dessert.rewrite._MARK_ASSERTION_INTROSPECTION
dessert.rewrite._MARK_ASSERTION_INTROSPECTION = True
@pytest.fixture
def module(request, source_filename):
with dessert.rewrite_assertions_context():
with _disable_pytest_rewriting():
module = emport.import_file(source_filename)
@request.addfinalizer
def drop_from_sys_modules(): # pylint: disable=unused-variable
sys.modules.pop(module.__name__)
return module
@contextmanager
def _disable_pytest_rewriting():
old_meta_path = sys.meta_path[:]
try:
for index, plugin in reversed(list(enumerate(sys.meta_path))):
if isinstance(plugin, PytestRewriteHook):
sys.meta_path.pop(index)
yield
finally:
sys.meta_path[:] = old_meta_path
@contextmanager
def _disable_introspection():
dessert.disable_message_introspection()
try:
yield
finally:
dessert.enable_message_introspection()
@pytest.fixture(params=[
"assert x() + y()",
"assert f(1) > g(100)",
"assert f(g(2)) == f(g(1))",
])
def assertion_line(request):
return request.param
@pytest.fixture(params=[True, False])
def add_assert_message(request):
return request.param
@pytest.fixture
def assert_message(request):
return 'msg'
@pytest.fixture
def source(assertion_line, add_assert_message, assert_message):
if add_assert_message:
assertion_line += ", '{}'".format(assert_message)
returned = """def f(x):
return x
x = lambda: 1
y = lambda: -1
g = h = f
def func():
variable = False
{0}
""".format(assertion_line)
return returned
@pytest.fixture
def source_filename(request, source):
path = mkdtemp()
@request.addfinalizer
def delete(): # pylint: disable=unused-variable
shutil.rmtree(path)
filename = os.path.join(path, "sourcefile.py")
with open(filename, "w") as f:
f.write(source)
return filename
| mit |
wpoa/wiki-imports | lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| gpl-3.0 |
oculusstorystudio/kraken | Python/kraken/plugins/canvas_plugin/graph_manager.py | 1 | 20242 | """Kraken Canvas - Canvas Graph Manager module.
Classes:
GraphManager -- Node management.
"""
import json
from kraken.core.kraken_system import ks
# import FabricEngine.Core as core
class GraphManager(object):
"""Manager object for taking care of all low level Canvas tasks"""
__dfgHost = None
__dfgBinding = None
__dfgArgs = None
__dfgExec = None
__dfgNodes = None
__dfgNodeAndPortMap = None
__dfgConnections = None
__dfgGroups = None
__dfgGroupNames = None
__dfgCurrentGroup = None
def __init__(self):
super(GraphManager, self).__init__()
client = ks.getCoreClient()
ks.loadExtension('KrakenForCanvas')
self.__dfgHost = client.getDFGHost()
self.__dfgBinding = self.__dfgHost.createBindingToNewGraph()
self.__dfgExec = self.__dfgBinding.getExec()
self.__dfgArgs = {}
self.__dfgNodes = {}
self.__dfgNodeAndPortMap = {}
self.__dfgConnections = {}
self.__dfgGroups = {}
self.__dfgGroupNames = []
self.__dfgCurrentGroup = None
# ===============
# Canvas Methods
# ===============
def setTitle(self, title):
self.__dfgExec.setTitle(title)
def getUniqueTitle(self, path, title):
titleSuffix = 1
uniqueTitle = title
lookup = '%s|%s' % (path, uniqueTitle)
while self.__dfgNodes.has_key(lookup):
titleSuffix = titleSuffix + 1
uniqueTitle = '%s_%d' % (title, titleSuffix)
lookup = '%s|%s' % (path, uniqueTitle)
return uniqueTitle
def addExtDep(self, extDep):
self.__dfgExec.addExtDep(extDep)
def hasNode(self, path, title=None):
lookup = path
if title is not None:
lookup = "%s|%s" % (path, title)
return lookup in self.__dfgNodes
def hasNodeSI(self, kSceneItem, title=None):
return self.hasNode(kSceneItem.getPath(), title=title)
def getNode(self, path, title=None):
lookup = path
if title is not None:
lookup = "%s|%s" % (path, title)
return self.__dfgNodes.get(lookup, None)
def getNodeSI(self, kSceneItem, title=None):
return self.getNode(kSceneItem.getPath(), title=title)
def getNodeAndPort(self, path, asInput=True):
if path not in self.__dfgNodeAndPortMap:
return None
nodeAndPort = self.__dfgNodeAndPortMap[path]
if asInput:
return nodeAndPort[0]
return nodeAndPort[1]
def getNodeAndPortSI(self, kSceneItem, asInput=True):
return self.getNodeAndPort(kSceneItem.getPath(), asInput=asInput)
def setNodeAndPort(self, path, node, port, asInput=False):
nodeAndPort = self.__dfgNodeAndPortMap.get(path, [(node, port), (node, port)])
if asInput:
nodeAndPort[0] = (node, port)
else:
nodeAndPort[1] = (node, port)
self.__dfgNodeAndPortMap[path] = nodeAndPort
def setNodeAndPortSI(self, kSceneItem, node, port, asInput=False):
self.setNodeAndPort(kSceneItem.getPath(), node, port, asInput=asInput)
def getExec(self):
return self.__dfgExec
def getSubExec(self, node):
return self.__dfgExec.getSubExec(node)
def hasArgument(self, name):
return self.__dfgArgs.has_key(name)
def getOrCreateArgument(self, name, dataType=None, defaultValue=None, portType="In"):
if self.__dfgArgs.has_key(name):
return self.__dfgArgs[name]
client = ks.getCoreClient()
dfgPortType = client.DFG.PortTypes.In
if portType.lower() == 'out':
dfgPortType = client.DFG.PortTypes.Out
elif portType.lower() == 'io':
dfgPortType = client.DFG.PortTypes.IO
self.__dfgArgs[name] = self.__dfgExec.addExecPort(name, dfgPortType)
if dataType:
self.__dfgBinding.setArgValue(self.__dfgArgs[name], ks.rtVal(dataType, defaultValue))
return self.__dfgArgs[name]
def removeArgument(self, name):
if name not in self.__dfgArgs:
return False
self.__dfgExec.removeExecPort(self.__dfgArgs[name])
del self.__dfgArgs[name]
return True
def createNodeFromPreset(self, path, preset, title=None, **metaData):
lookup = path
if title is not None:
lookup = "%s|%s" % (path, title)
if lookup in self.__dfgNodes:
raise Exception("Node for %s already exists." % lookup)
node = self.__dfgExec.addInstFromPreset(preset)
self.__dfgNodes[lookup] = node
self.setNodeMetaDataFromDict(lookup, metaData)
self.__addNodeToGroup(node)
return node
def createNodeFromPresetSI(self, kSceneItem, preset, title=None, **metaData):
node = self.createNodeFromPreset(kSceneItem.getPath(), preset, title=title, **metaData)
self.setNodeMetaDataSI(kSceneItem, 'uiComment', kSceneItem.getPath(), title=title)
return node
def createFunctionNode(self, path, title, **metaData):
lookup = path
if title is not None:
lookup = "%s|%s" % (path, title)
if lookup in self.__dfgNodes:
raise Exception("Node for %s already exists." % lookup)
node = self.__dfgExec.addInstWithNewFunc(title)
self.__dfgNodes[lookup] = node
self.setNodeMetaDataFromDict(lookup, metaData)
self.__addNodeToGroup(node)
return node
def createFunctionNodeSI(self, kSceneItem, title, **metaData):
return self.createFunctionNode(kSceneItem.getPath(), title, **metaData)
def createVariableNode(self, path, title, dataType, extension="", **metaData):
lookup = path
if title is not None:
lookup = "%s|%s" % (path, title)
if lookup in self.__dfgNodes:
raise Exception("Node for %s already exists." % lookup)
node = self.__dfgExec.addVar(title, dataType, extension)
self.__dfgNodes[lookup] = node
self.setNodeMetaDataFromDict(lookup, metaData)
self.__addNodeToGroup(node)
return node
def createVariableNodeSI(self, kSceneItem, title, dataType, extension="", **metaData):
return self.createVariableNode(kSceneItem.getPath(), title, dataType, extension=extension, **metaData)
def removeNode(self, path, title=None):
lookup = path
if title is not None:
lookup = "%s|%s" % (path, title)
if lookup not in self.__dfgNodes:
raise Exception("Node for %s does not exist." % lookup)
node = self.__dfgNodes[lookup]
self.__dfgExec.removeNode(node)
del self.__dfgNodes[lookup]
# clean up groups
for group in self.__dfgGroups:
for i in range(len(self.__dfgGroups[group])):
if self.__dfgGroups[group][i] == node:
del self.__dfgGroups[group][i]
break
# clean up connections
if node in self.__dfgConnections:
del self.__dfgConnections[node]
for nodeName in self.__dfgConnections:
ports = self.__dfgConnections[nodeName]
for portName in ports:
connections = ports[portName]
newConnections = []
for c in connections:
if c[0] == node:
continue
newConnections += [c]
self.__dfgConnections[nodeName][portName] = newConnections
return True
def removeNodeSI(self, kSceneItem, title=None):
return self.removeNode(kSceneItem.getPath(), title=title)
def connectNodes(self, nodeA, portA, nodeB, portB):
self.removeConnection(nodeB, portB)
typeA = self.getNodePortResolvedType(nodeA, portA)
typeB = self.getNodePortResolvedType(nodeB, portB)
if typeA != typeB and typeA != None and typeB != None:
if typeA == 'Xfo' and typeB == 'Mat44':
preset = "Fabric.Exts.Math.Xfo.ToMat44"
title = self.getUniqueTitle(nodeA, 'Convert')
convertNode = self.createNodeFromPreset(nodeA, preset, title=title)
self.connectNodes(nodeA, portA, convertNode, "this")
nodeA = convertNode
portA = "result"
elif typeA == 'Mat44' and typeB == 'Xfo':
preset = "Fabric.Exts.Math.Xfo.SetFromMat44"
title = self.getUniqueTitle(nodeA, 'Convert')
convertNode = self.createNodeFromPreset(nodeA, preset, title=title)
self.connectNodes(nodeA, portA, convertNode, "m")
nodeA = convertNode
portA = "this"
else:
raise Exception('Cannot connect - incompatible type specs %s and %s.' % (typeA, typeB))
self.__dfgExec.connectTo(nodeA+'.'+portA, nodeB+'.'+portB)
if not self.__dfgConnections.has_key(nodeA):
self.__dfgConnections[nodeA] = {}
if not self.__dfgConnections[nodeA].has_key(portA):
self.__dfgConnections[nodeA][portA] = []
self.__dfgConnections[nodeA][portA].append((nodeB, portB))
return True
def connectArg(self, argA, argB, argC):
if self.__dfgArgs.has_key(argA):
self.__dfgExec.connectTo(argA, argB+'.'+argC)
return True
elif self.__dfgArgs.has_key(argC):
self.__dfgExec.connectTo(argA+'.'+argB, argC)
return True
return False
def replaceConnections(self, oldNode, oldPort, newNode, newPort):
prevConnections = []
prevConnections = self.getConnections(oldNode, oldPort)
for c in prevConnections:
if c[0] == newNode:
continue
self.removeConnection(c[0], c[1])
self.connectNodes(newNode, newPort, c[0], c[1])
def removeConnection(self, node, port):
result = False
for nodeName in self.__dfgConnections:
ports = self.__dfgConnections[nodeName]
for portName in ports:
connections = ports[portName]
newConnections = []
for i in range(len(connections)):
if '.'.join(connections[i]) == node+'.'+port:
self.__dfgExec.disconnectFrom(nodeName+'.'+portName, node+'.'+port)
result = True
break
else:
newConnections += [connections[i]]
self.__dfgConnections[nodeName][portName] = newConnections
if result:
return result
return result
def getConnections(self, node, port, targets=True):
result = []
for nodeName in self.__dfgConnections:
ports = self.__dfgConnections[nodeName]
for portName in ports:
connections = ports[portName]
if targets:
if node+'.'+port == nodeName+'.'+portName:
result += connections
else:
continue
else:
for c in connections:
if '.'.join(c) == node+'.'+port:
result += [(nodeName, portName)]
return result
def getNodeMetaData(self, path, key, defaultValue=None, title=None):
lookup = path
if not title is None:
lookup = "%s|%s" % (path, title)
if not self.__dfgNodes.has_key(lookup):
return defaultValue
node = self.__dfgNodes[lookup]
return self.__dfgExec.getNodeMetadata(node, key)
def getNodeMetaDataSI(self, kSceneItem, key, defaultValue=None, title=None):
return self.getNodeMetaData(kSceneItem.getPath(), key, defaultValue=defaultValue, title=title)
def setNodeMetaData(self, path, key, value, title=None):
lookup = path
node = path
if not title is None:
lookup = "%s|%s" % (path, title)
if self.__dfgNodes.has_key(lookup):
node = self.__dfgNodes[lookup]
self.__dfgExec.setNodeMetadata(node, key, str(value))
if key == 'uiComment':
self.__dfgExec.setNodeMetadata(node, 'uiCommentExpanded', 'true')
return True
def setNodeMetaDataSI(self, kSceneItem, key, value, title=None):
return self.setNodeMetaData(kSceneItem.getPath(), key, value, title=title)
def setNodeMetaDataFromDict(self, node, metaData):
for key in metaData:
self.setNodeMetaData(node, key, value, metaData[key])
def computeCurrentPortValue(self, node, port):
client = ks.getCoreClient()
tempPort = self.getOrCreateArgument("temp", portType="Out")
self.connectArg(node, port, tempPort)
errors = json.loads(self.__dfgBinding.getErrors(True))
if errors and len(errors) > 0:
raise Exception(str(errors))
self.__dfgBinding.execute()
value = self.__dfgBinding.getArgValue(tempPort)
self.removeArgument(tempPort)
return value
def computeCurrentPortValueSI(self, kSceneItem):
nodeAndPort = self.getNodeAndPortSI(kSceneItem, asInput=True)
if not nodeAndPort:
return None
(node, port) = nodeAndPort
return self.computeCurrentPortValue(node, port)
def setPortDefaultValue(self, node, port, value):
portPath = "%s.%s" % (node, port)
subExec = self.__dfgExec.getSubExec(node)
dataType = subExec.getExecPortTypeSpec(port)
rtVal = value
if str(type(rtVal)) != '<type \'PyRTValObject\'>':
rtVal = ks.rtVal(dataType, value)
self.__dfgExec.setPortDefaultValue(portPath, rtVal)
return True
def getNodePortResolvedType(self, node, port):
result = self.__dfgExec.getNodePortResolvedType(node+'.'+port)
return result
def getCurrentGroup(self):
return self.__dfgCurrentGroup
def getAllGroupNames(self):
return self.__dfgGroupNames + []
def getNodesInGroup(self, group):
return self.__dfgGroups.get(group, []) + []
def setCurrentGroup(self, group):
if group is None:
self.__dfgCurrentGroup = None
return None
if not self.__dfgGroups.has_key(group):
self.__dfgGroups[group] = []
self.__dfgGroupNames.append(group)
if group != self.__dfgCurrentGroup:
self.__dfgCurrentGroup = group
return self.__dfgGroups[self.__dfgCurrentGroup]
def __addNodeToGroup(self, node):
if(not self.__dfgCurrentGroup):
return
self.__dfgGroups[self.__dfgCurrentGroup].append(node)
def getAllNodeNames(self):
return self.__dfgNodes.values()
def getNodeConnections(self, nodeName):
keys = {}
result = []
node = self.__dfgConnections.get(nodeName, {})
for portName in node:
port = node[portName]
for (otherNode, otherPort) in port:
key = '%s - %s' % (nodeName, otherNode)
if keys.has_key(key):
continue
keys[key] = True
result += [otherNode]
return result
def getAllNodeConnections(self):
keys = {}
result = {}
for nodeName in self.__dfgConnections:
node = self.__dfgConnections[nodeName]
for portName in node:
port = node[portName]
for (otherNode, otherPort) in port:
key = '%s - %s' % (nodeName, otherNode)
if keys.has_key(key):
continue
keys[key] = True
if not result.has_key(nodeName):
result[nodeName] = []
result[nodeName] += [otherNode]
return result
def getNumPorts(self, node):
nodeType = self.__dfgExec.getNodeType(node)
if nodeType == 3: # var
return 1
elif nodeType == 0: # inst
subExec = self.getSubExec(node)
return subExec.getExecPortCount()
return 0
def hasInputConnections(self, node):
for nodeName in self.__dfgConnections:
ports = self.__dfgConnections[nodeName]
for portName in ports:
connections = ports[portName]
for c in connections:
if c[0] == node:
return True
return False
def hasOutputConnections(self, node):
ports = self.__dfgConnections.get(node, {})
for port in ports:
if len(ports) > 0:
return True
return False
def getPortIndex(self, node, port):
nodeType = self.__dfgExec.getNodeType(node)
if nodeType == 3: # var
return 0
elif nodeType == 0: # inst
subExec = self.getSubExec(node)
for i in range(subExec.getExecPortCount()):
portName = subExec.getExecPortName(i)
if portName == port:
return i
return 0
def getMinConnectionPortIndex(self, sourceNode, targetNode):
minIndex = 10000
node = self.__dfgConnections.get(sourceNode, {})
for portName in node:
port = node[portName]
for (otherNode, otherPort) in port:
if not otherNode == targetNode:
continue
index = self.getPortIndex(otherNode, otherPort)
if index < minIndex:
minIndex = index
if minIndex == 10000:
return 0
return minIndex
def getAllNodePortIndices(self):
result = {}
nodes = self.getAllNodeNames()
for n in nodes:
result[n] = {}
nodeType = self.__dfgExec.getNodeType(n)
if nodeType == 3: # var
result[n]['value'] = 0
elif nodeType == 0: # inst
subExec = self.getSubExec(n)
for i in range(subExec.getExecPortCount()):
port = subExec.getExecPortName(i)
result[n][port] = i
return result
def getAllInputConnections(self):
nodes = self.getAllNodeNames()
connections = {}
for n in nodes:
connections[n] = []
def implodeNodesByGroup(self):
for group in self.__dfgGroupNames:
nodes = self.__dfgGroups[group]
implodedName = self.__dfgExec.implodeNodes(group, nodes)
break # todo... right now this doesn't work properly
# todo
# # rename the ports based on their source metadata
# subExec = self.__dfgTopLevelGraph.getSubExec(implodedName)
# for i in range(subExec.getExecPortCount()):
# if subExec.getExecPortType(i) == client.DFG.PortTypes.In:
# continue
# arg = subExec.getExecPortName(i)
# shouldBreak = False
# for j in range(subExec.getNodeCount()):
# if shouldBreak:
# break
# node = subExec.getNodeName(j)
# if subExec.getNodeType(node) > 1:
# continue
# nodeExec = subExec.getSubExec(node)
# for k in range(nodeExec.getExecPortCount()):
# port = nodeExec.getExecPortName(k)
# if subExec.isConnectedTo(node+'.'+port, arg):
# metaData = subExec.getNodeMetadata(node, 'uiComment')
# if not metaData:
# continue
# name = metaData.rpartition('.')[2]
# subExec.renameExecPort(arg, name)
# shouldBreak = True
# break
def saveToFile(self, filePath):
content = self.__dfgBinding.exportJSON()
open(filePath, "w").write(content)
print 'Canvas Builder: Saved file '+str(filePath)
| bsd-3-clause |
timsnyder/bokeh | bokeh/util/tests/test_compiler.py | 1 | 7783 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
import io
import json
from mock import patch
# External imports
# Bokeh imports
# Module under test
import bokeh.util.compiler as buc
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_nodejs_compile_coffeescript():
assert buc.nodejs_compile("""(a, b) -> a + b""", "coffeescript", "some.coffee") == \
dict(code="""\
(function (a, b) {
return a + b;
});
""", deps=[])
assert buc.nodejs_compile("""some = require 'some/module'""", "coffeescript", "some.coffee") == \
dict(code="""\
var some;
some = require('some/module');
""", deps=["some/module"])
assert buc.nodejs_compile("""(a, b) -> a + b +""", "coffeescript", "some.coffee") == \
dict(error=dict(
message="unexpected end of input",
text="some.coffee:unexpected end of input"))
assert buc.nodejs_compile("""some = require some/module'""", "coffeescript", "some.coffee") == \
dict(error=dict(
line=1,
column=27,
message="missing '",
text="some.coffee:1:27:missing '",
extract="some = require some/module'",
annotated="some.coffee:1:27:missing '\n some = require some/module'\n ^"))
assert buc.nodejs_compile("""(a, b) -> a + b +""", "coffeescript", "some.coffee") == \
dict(error=dict(
message="unexpected end of input",
text="some.coffee:unexpected end of input"))
assert buc.nodejs_compile("""some = require some/module'""", "coffeescript", "some.coffee") == \
dict(error=dict(
line=1,
column=27,
message="missing '",
text="some.coffee:1:27:missing '",
extract="some = require some/module'",
annotated="some.coffee:1:27:missing '\n some = require some/module'\n ^"))
def test_nodejs_compile_javascript():
assert buc.nodejs_compile("""function f(a, b) { return a + b; };""", "javascript", "some.js") == \
dict(code="""\
function f(a, b) { return a + b; }
;
""", deps=[])
assert buc.nodejs_compile("""var some = require('some/module');""", "javascript", "some.js") == \
dict(code="""\
var some = require('some/module');
""", deps=["some/module"])
assert buc.nodejs_compile("""function f(a, b) { eturn a + b; };""", "javascript", "some.js") == \
dict(error=
'\x1b[96msome.js\x1b[0m:\x1b[93m1\x1b[0m:\x1b[93m26\x1b[0m - '
"\x1b[91merror\x1b[0m\x1b[90m TS1005: \x1b[0m';' expected.\n"
'\n'
'\x1b[7m1\x1b[0m function f(a, b) { eturn a + b; };\n'
'\x1b[7m \x1b[0m \x1b[91m ~\x1b[0m\n')
def test_nodejs_compile_less():
assert buc.nodejs_compile(""".bk-some-style { color: mix(#ff0000, #0000ff, 50%); }""", "less", "some.less") == \
dict(code=""".bk-some-style{color:#800080}""")
assert buc.nodejs_compile(""".bk-some-style color: green; }""", "less", "some.less") == \
dict(error=dict(
line=1,
column=21,
message="Unrecognised input",
text="some.less:1:21:Unrecognised input",
extract=".bk-some-style color: green; }",
annotated="some.less:1:21:Unrecognised input\n .bk-some-style color: green; }"))
def test_Implementation():
obj = buc.Implementation()
assert obj.file == None
def test_Inline():
obj = buc.Inline("code")
assert obj.code == "code"
assert obj.file == None
obj = buc.Inline("code", "file")
assert obj.code == "code"
assert obj.file == "file"
def test_CoffeeScript():
obj = buc.CoffeeScript("code")
assert isinstance(obj, buc.Inline)
assert obj.code == "code"
assert obj.file == None
assert obj.lang == "coffeescript"
def test_TypeScript():
obj = buc.TypeScript("code")
assert isinstance(obj, buc.Inline)
assert obj.code == "code"
assert obj.file == None
assert obj.lang == "typescript"
def test_JavaScript():
obj = buc.JavaScript("code")
assert isinstance(obj, buc.Inline)
assert obj.code == "code"
assert obj.file == None
assert obj.lang == "javascript"
def test_Less():
obj = buc.Less("code")
assert isinstance(obj, buc.Inline)
assert obj.code == "code"
assert obj.file == None
assert obj.lang == "less"
@patch('io.open')
def test_FromFile(mock_open):
obj = buc.FromFile("path.coffee")
assert obj.lang == "coffeescript"
obj = buc.FromFile("path.ts")
assert obj.lang == "typescript"
obj = buc.FromFile("path.js")
assert obj.lang == "javascript"
obj = buc.FromFile("path.css")
assert obj.lang == "less"
obj = buc.FromFile("path.less")
assert obj.lang == "less"
def test_exts():
assert buc.exts == (".coffee", ".ts", ".js", ".css", ".less")
def test_jsons():
for file in os.listdir(os.path.join(buc.bokehjs_dir, "js")):
if file.endswith('.json'):
with io.open(os.path.join(buc.bokehjs_dir, "js", file), encoding="utf-8") as f:
assert all(['\\' not in mod for mod in json.loads(f.read())])
def test_inline_extension():
from bokeh.io import save
from bokeh.models import TickFormatter
from bokeh.plotting import figure
from bokeh.util.compiler import TypeScript
TS_CODE = """
import {TickFormatter} from "models/formatters/tick_formatter"
export class TestFormatter extends TickFormatter {
doFormat(ticks: number[]): string[] {
if (ticks.length == 0)
return[]
else {
const formatted = [`${ticks[0]}`]
for (let i = 1; i < ticks.length; i++) {
const difference = (ticks[i] - ticks[0]).toPrecision(2)
formatted.push(`+${difference}}`)
}
return formatted
}
}
}
"""
class TestFormatter(TickFormatter):
__implementation__ = TypeScript(TS_CODE)
class TestFormatter2(TickFormatter):
__implementation__ = TypeScript("^") # invalid syntax on purpose
p = figure()
p.circle([1, 2, 3, 4, 6], [5, 7, 3, 2, 4])
p.xaxis.formatter = TestFormatter()
save(p)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
i-maravic/ns-3 | src/buildings/doc/source/conf.py | 175 | 7083 | # -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath',
'sphinxcontrib.seqdiag']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'buildings'
# General information about the project.
project = u'LENA'
copyright = u'2011-2012, CTTC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'M2'
# The full version, including alpha/beta/rc tags.
release = 'M2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
#htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('buildings', 'buildings.tex', u'Buildings Module Documentation', u'Centre Tecnologic de Telecomunicacions de Catalunya (CTTC)', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-model-library', u'ns-3 Model Library',
[u'ns-3 project'], 1)
]
| gpl-2.0 |
coronary/RandomEpisode | depends/Lib/site-packages/requests/utils.py | 618 | 21334 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
| mit |
Volvagia356/pixiv-downloader | pixivdl-gui.py | 1 | 8165 | #!/usr/bin/env python
import os,sys
import pickle
import threading
from Queue import Queue
from pixiv_api import Pixiv
import pixivdl
import Tkinter
import ttk
import tkFileDialog
import tkMessageBox
class Application(Tkinter.Frame):
def __init__(self,master=None):
Tkinter.Frame.__init__(self,master)
master.title("pixiv Downloader GUI")
master.resizable(0,0)
self['padx']=10
self['pady']=10
self.pack()
self.create_widgets()
self.cancel_download=threading.Event()
self.event_queue=Queue()
sys.stdout=OutputHandler(self,sys.stdout)
pixivdl.print_welcome()
self.state="waiting"
def create_widgets(self):
self.pack({'side':'top', 'fill':'x', 'expand':1})
self.target_field=TargetField(self)
self.directory_field=DirectoryField(self)
self.progress_indicator=ProgressIndicator(self)
def download(self):
thread_args=(self.target_field.field.get(),self.directory_field.field.get(),self)
thread=threading.Thread(target=download_thread,args=thread_args)
thread.daemon=True
thread.start()
def set_state(self,state):
self.state=state
if state=="waiting":
self.target_field.field['state']='normal'
self.target_field.button['text']="Download"
self.target_field.button['state']='normal'
self.directory_field.field['state']='normal'
self.directory_field.button['state']='normal'
elif state=="downloading":
self.target_field.field['state']='disabled'
self.target_field.button['text']="Cancel"
self.target_field.button['state']='normal'
self.directory_field.field['state']='disabled'
self.directory_field.button['state']='disabled'
elif state=="cancelling":
self.target_field.field['state']='disabled'
self.target_field.button['text']="Cancel"
self.target_field.button['state']='disabled'
self.directory_field.field['state']='disabled'
self.directory_field.button['state']='disabled'
self.progress_indicator.status['text']="Waiting for current Work to complete..."
class TargetField(Tkinter.Frame):
def __init__(self,master=None):
Tkinter.Frame.__init__(self,master)
self.pack({'side':'top', 'anchor':'w', 'fill':'x', 'expand':1})
self.label=Tkinter.Label(self)
self.label['text']="Artist: "
self.label.pack({'side':'left'})
self.field=Tkinter.Entry(self)
self.field.pack({'side':'left', 'fill':'x', 'expand':1})
self.button=DownloadButton(master,self)
self.button.pack({'side':'left'})
class DownloadButton(Tkinter.Button):
def __init__(self,app,master=None):
Tkinter.Button.__init__(self,master)
self['text']="Download"
self['command']=self.click
self.app=app
def click(self):
if self.app.state=="waiting":
if not os.path.isdir(self.app.directory_field.field.get()):
tkMessageBox.showerror("Error","Invalid directory!")
return 0
if len(self.app.target_field.field.get())==0:
tkMessageBox.showerror("Error","Empty artist!")
return 0
self.app.download()
self.app.set_state("downloading")
elif self.app.state=="downloading":
self.app.cancel_download.set()
self.app.set_state("cancelling")
class DirectoryField(Tkinter.Frame):
def __init__(self,master=None):
Tkinter.Frame.__init__(self,master)
self.pack({'side':'top', 'anchor':'w', 'fill':'x', 'expand':1})
self.label=Tkinter.Label(self)
self.label['text']="Directory: "
self.label.pack({'side':'left'})
self.field=Tkinter.Entry(self)
self.field.pack({'side':'left', 'fill':'x', 'expand':1})
self.button=Tkinter.Button(self)
self.button['text']="Browse"
self.button['command']=self.browse
self.button.pack({'side':'left'})
def browse(self):
directory=tkFileDialog.askdirectory()
self.field.delete(0,Tkinter.END)
self.field.insert(0,directory)
class ProgressIndicator(Tkinter.Frame):
def __init__(self,master=None):
Tkinter.Frame.__init__(self,master)
self.pack({'side':'top', 'fill':'both', 'expand':1})
self.status=ttk.Label(self)
self.status.pack({'side':'top', 'anchor':'e'})
self.status['text']="Inactive"
self.progress_bar=ttk.Progressbar(self)
self.progress_bar.pack({'side':'top', 'fill':'x', 'expand':1})
self.console=Console(self)
self.console['pady']=5
self.console.pack({'side':'top', 'fill':'both', 'expand':1})
def update(self,current_work,total_work):
self.status['text']="Downloading {} of {}".format(current_work+1,total_work)
self.progress_bar['max']=total_work
self.progress_bar['value']=current_work
def complete(self):
self.status['text']="Completed"
self.progress_bar['max']=0
def console_write(self,data):
console=self.console.textbox
console['state']='normal'
console.insert(Tkinter.END,data)
console.see(Tkinter.END)
console['state']='disabled'
class Console(Tkinter.Frame):
def __init__(self,master=None):
Tkinter.Frame.__init__(self,master)
self.textbox=Tkinter.Text(self)
self.textbox['state']='disabled'
self.textbox.pack({'side':'left', 'fill':'both', 'expand':1})
self.scrollbar=Tkinter.Scrollbar(self)
self.scrollbar.pack({'side':'left', 'fill':'y', 'expand':1})
self.textbox['yscrollcommand']=self.scrollbar.set
self.scrollbar['command']=self.textbox.yview
class OutputHandler():
def __init__(self,app,stdout):
self.app=app
self.stdout=stdout
def write(self,data):
self.app.event_queue.put({'type':'console','data':data})
self.stdout.write(data)
def download_thread(target,directory,app):
try:
app.event_queue.put({'type':'progress', 'data':(0,1)})
app.event_queue.put({'type':'status', 'data':"Getting data..."})
session_id=pixivdl.get_session_config()
p=Pixiv(session_id)
works=p.get_works_all(target)
for i in range(len(works)):
work=works[i]
app.event_queue.put({'type':'progress', 'data':(i,len(works))})
pixivdl.download_work(work,directory)
if app.cancel_download.is_set():
app.cancel_download.clear()
app.event_queue.put({'type':'state', 'data':"waiting"})
app.event_queue.put({'type':'status', 'data':"Download cancelled"})
print "\nCancelled"
return 0
pickle.dump(works,open(directory+"/metadata.pickle",'wb'))
print ''
app.event_queue.put({'type':'function', 'data':app.progress_indicator.complete})
app.event_queue.put({'type':'state', 'data':"waiting"})
except:
app.event_queue.put({'type':'error', 'data':"An unknown error occured!"})
app.event_queue.put({'type':'status', 'data':"Unknown error occured"})
app.event_queue.put({'type':'state', 'data':"waiting"})
def eventloop(app):
while not app.event_queue.empty():
event=app.event_queue.get()
if event['type']=="console":
app.progress_indicator.console_write(event['data'])
elif event['type']=="error":
tkMessageBox.showerror("Error",event['data'])
elif event['type']=="progress":
app.progress_indicator.update(*event['data'])
elif event['type']=="status":
app.progress_indicator.status['text']=event['data']
elif event['type']=="state":
app.set_state(event['data'])
elif event['type']=="function":
event['data']()
app.after(100,eventloop,app)
root=Tkinter.Tk()
app=Application(master=root)
eventloop(app)
app.mainloop() | bsd-3-clause |
YongseopKim/crosswalk-test-suite | webapi/tct-download-tizen-tests/inst.wgt.py | 294 | 6758 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/numpy/linalg/tests/test_regression.py | 50 | 5414 | """ Test functions for linalg module
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy import linalg, arange, float64, array, dot, transpose
from numpy.testing import (
TestCase, run_module_suite, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_array_less
)
rlevel = 1
class TestRegression(TestCase):
def test_eig_build(self, level=rlevel):
# Ticket #652
rva = array([1.03221168e+02 + 0.j,
-1.91843603e+01 + 0.j,
-6.04004526e-01 + 15.84422474j,
-6.04004526e-01 - 15.84422474j,
-1.13692929e+01 + 0.j,
-6.57612485e-01 + 10.41755503j,
-6.57612485e-01 - 10.41755503j,
1.82126812e+01 + 0.j,
1.06011014e+01 + 0.j,
7.80732773e+00 + 0.j,
-7.65390898e-01 + 0.j,
1.51971555e-15 + 0.j,
-1.51308713e-15 + 0.j])
a = arange(13 * 13, dtype=float64)
a.shape = (13, 13)
a = a % 17
va, ve = linalg.eig(a)
va.sort()
rva.sort()
assert_array_almost_equal(va, rva)
def test_eigh_build(self, level=rlevel):
# Ticket 662.
rvals = [68.60568999, 89.57756725, 106.67185574]
cov = array([[77.70273908, 3.51489954, 15.64602427],
[3.51489954, 88.97013878, -1.07431931],
[15.64602427, -1.07431931, 98.18223512]])
vals, vecs = linalg.eigh(cov)
assert_array_almost_equal(vals, rvals)
def test_svd_build(self, level=rlevel):
# Ticket 627.
a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
m, n = a.shape
u, s, vh = linalg.svd(a)
b = dot(transpose(u[:, n:]), a)
assert_array_almost_equal(b, np.zeros((2, 2)))
def test_norm_vector_badarg(self):
# Regression for #786: Froebenius norm for vectors raises
# TypeError.
self.assertRaises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')
def test_lapack_endian(self):
# For bug #1482
a = array([[5.7998084, -2.1825367],
[-2.1825367, 9.85910595]], dtype='>f8')
b = array(a, dtype='<f8')
ap = linalg.cholesky(a)
bp = linalg.cholesky(b)
assert_array_equal(ap, bp)
def test_large_svd_32bit(self):
# See gh-4442, 64bit would require very large/slow matrices.
x = np.eye(1000, 66)
np.linalg.svd(x)
def test_svd_no_uv(self):
# gh-4733
for shape in (3, 4), (4, 4), (4, 3):
for t in float, complex:
a = np.ones(shape, dtype=t)
w = linalg.svd(a, compute_uv=False)
c = np.count_nonzero(np.absolute(w) > 0.5)
assert_equal(c, 1)
assert_equal(np.linalg.matrix_rank(a), 1)
assert_array_less(1, np.linalg.norm(a, ord=2))
def test_norm_object_array(self):
# gh-7575
testvector = np.array([np.array([0, 1]), 0, 0], dtype=object)
norm = linalg.norm(testvector)
assert_array_equal(norm, [0, 1])
self.assertEqual(norm.dtype, np.dtype('float64'))
norm = linalg.norm(testvector, ord=1)
assert_array_equal(norm, [0, 1])
self.assertNotEqual(norm.dtype, np.dtype('float64'))
norm = linalg.norm(testvector, ord=2)
assert_array_equal(norm, [0, 1])
self.assertEqual(norm.dtype, np.dtype('float64'))
self.assertRaises(ValueError, linalg.norm, testvector, ord='fro')
self.assertRaises(ValueError, linalg.norm, testvector, ord='nuc')
self.assertRaises(ValueError, linalg.norm, testvector, ord=np.inf)
self.assertRaises(ValueError, linalg.norm, testvector, ord=-np.inf)
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
self.assertRaises((AttributeError, DeprecationWarning),
linalg.norm, testvector, ord=0)
self.assertRaises(ValueError, linalg.norm, testvector, ord=-1)
self.assertRaises(ValueError, linalg.norm, testvector, ord=-2)
testmatrix = np.array([[np.array([0, 1]), 0, 0],
[0, 0, 0]], dtype=object)
norm = linalg.norm(testmatrix)
assert_array_equal(norm, [0, 1])
self.assertEqual(norm.dtype, np.dtype('float64'))
norm = linalg.norm(testmatrix, ord='fro')
assert_array_equal(norm, [0, 1])
self.assertEqual(norm.dtype, np.dtype('float64'))
self.assertRaises(TypeError, linalg.norm, testmatrix, ord='nuc')
self.assertRaises(ValueError, linalg.norm, testmatrix, ord=np.inf)
self.assertRaises(ValueError, linalg.norm, testmatrix, ord=-np.inf)
self.assertRaises(ValueError, linalg.norm, testmatrix, ord=0)
self.assertRaises(ValueError, linalg.norm, testmatrix, ord=1)
self.assertRaises(ValueError, linalg.norm, testmatrix, ord=-1)
self.assertRaises(TypeError, linalg.norm, testmatrix, ord=2)
self.assertRaises(TypeError, linalg.norm, testmatrix, ord=-2)
self.assertRaises(ValueError, linalg.norm, testmatrix, ord=3)
if __name__ == '__main__':
run_module_suite()
| mit |
yury-s/v8-inspector | Source/chrome/tools/gyp/test/hello/gyptest-regyp-output.py | 202 | 1077 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that Makefiles get rebuilt when a source gyp file changes and
--generator-output is used.
"""
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make and Android generators, and --generator-output is not supported
# by Android and ninja, so we can only test for make.
test = TestGyp.TestGyp(formats=['make'])
CHDIR='generator-output'
test.run_gyp('hello.gyp', '--generator-output=%s' % CHDIR)
test.build('hello.gyp', test.ALL, chdir=CHDIR)
test.run_built_executable('hello', stdout="Hello, world!\n", chdir=CHDIR)
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('hello.gyp', test.read('hello2.gyp'))
test.build('hello.gyp', test.ALL, chdir=CHDIR)
test.run_built_executable('hello', stdout="Hello, two!\n", chdir=CHDIR)
test.pass_test()
| bsd-3-clause |
aronsky/home-assistant | homeassistant/components/light/mqtt_template.py | 3 | 14398 | """
Support for MQTT Template lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.mqtt_template/
"""
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components import mqtt
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH,
ATTR_HS_COLOR, ATTR_TRANSITION, ATTR_WHITE_VALUE, Light, PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_FLASH,
SUPPORT_COLOR, SUPPORT_TRANSITION, SUPPORT_WHITE_VALUE)
from homeassistant.const import CONF_NAME, CONF_OPTIMISTIC, STATE_ON, STATE_OFF
from homeassistant.components.mqtt import (
CONF_AVAILABILITY_TOPIC, CONF_STATE_TOPIC, CONF_COMMAND_TOPIC,
CONF_PAYLOAD_AVAILABLE, CONF_PAYLOAD_NOT_AVAILABLE, CONF_QOS, CONF_RETAIN,
MqttAvailability)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
from homeassistant.helpers.restore_state import async_get_last_state
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'mqtt_template'
DEPENDENCIES = ['mqtt']
DEFAULT_NAME = 'MQTT Template Light'
DEFAULT_OPTIMISTIC = False
CONF_BLUE_TEMPLATE = 'blue_template'
CONF_BRIGHTNESS_TEMPLATE = 'brightness_template'
CONF_COLOR_TEMP_TEMPLATE = 'color_temp_template'
CONF_COMMAND_OFF_TEMPLATE = 'command_off_template'
CONF_COMMAND_ON_TEMPLATE = 'command_on_template'
CONF_EFFECT_LIST = 'effect_list'
CONF_EFFECT_TEMPLATE = 'effect_template'
CONF_GREEN_TEMPLATE = 'green_template'
CONF_RED_TEMPLATE = 'red_template'
CONF_STATE_TEMPLATE = 'state_template'
CONF_WHITE_VALUE_TEMPLATE = 'white_value_template'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_BLUE_TEMPLATE): cv.template,
vol.Optional(CONF_BRIGHTNESS_TEMPLATE): cv.template,
vol.Optional(CONF_COLOR_TEMP_TEMPLATE): cv.template,
vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EFFECT_TEMPLATE): cv.template,
vol.Optional(CONF_GREEN_TEMPLATE): cv.template,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_RED_TEMPLATE): cv.template,
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_WHITE_VALUE_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_OFF_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_ON_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_QOS, default=mqtt.DEFAULT_QOS):
vol.All(vol.Coerce(int), vol.In([0, 1, 2])),
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up a MQTT Template light."""
if discovery_info is not None:
config = PLATFORM_SCHEMA(discovery_info)
async_add_entities([MqttTemplate(
hass,
config.get(CONF_NAME),
config.get(CONF_EFFECT_LIST),
{
key: config.get(key) for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC
)
},
{
key: config.get(key) for key in (
CONF_BLUE_TEMPLATE,
CONF_BRIGHTNESS_TEMPLATE,
CONF_COLOR_TEMP_TEMPLATE,
CONF_COMMAND_OFF_TEMPLATE,
CONF_COMMAND_ON_TEMPLATE,
CONF_EFFECT_TEMPLATE,
CONF_GREEN_TEMPLATE,
CONF_RED_TEMPLATE,
CONF_STATE_TEMPLATE,
CONF_WHITE_VALUE_TEMPLATE,
)
},
config.get(CONF_OPTIMISTIC),
config.get(CONF_QOS),
config.get(CONF_RETAIN),
config.get(CONF_AVAILABILITY_TOPIC),
config.get(CONF_PAYLOAD_AVAILABLE),
config.get(CONF_PAYLOAD_NOT_AVAILABLE),
)])
class MqttTemplate(MqttAvailability, Light):
"""Representation of a MQTT Template light."""
def __init__(self, hass, name, effect_list, topics, templates, optimistic,
qos, retain, availability_topic, payload_available,
payload_not_available):
"""Initialize a MQTT Template light."""
super().__init__(availability_topic, qos, payload_available,
payload_not_available)
self._name = name
self._effect_list = effect_list
self._topics = topics
self._templates = templates
self._optimistic = optimistic or topics[CONF_STATE_TOPIC] is None \
or templates[CONF_STATE_TEMPLATE] is None
self._qos = qos
self._retain = retain
# features
self._state = False
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
self._brightness = 255
else:
self._brightness = None
if self._templates[CONF_COLOR_TEMP_TEMPLATE] is not None:
self._color_temp = 255
else:
self._color_temp = None
if self._templates[CONF_WHITE_VALUE_TEMPLATE] is not None:
self._white_value = 255
else:
self._white_value = None
if (self._templates[CONF_RED_TEMPLATE] is not None and
self._templates[CONF_GREEN_TEMPLATE] is not None and
self._templates[CONF_BLUE_TEMPLATE] is not None):
self._hs = [0, 0]
else:
self._hs = None
self._effect = None
for tpl in self._templates.values():
if tpl is not None:
tpl.hass = hass
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
last_state = await async_get_last_state(self.hass, self.entity_id)
@callback
def state_received(topic, payload, qos):
"""Handle new MQTT messages."""
state = self._templates[CONF_STATE_TEMPLATE].\
async_render_with_possible_json_value(payload)
if state == STATE_ON:
self._state = True
elif state == STATE_OFF:
self._state = False
else:
_LOGGER.warning("Invalid state value received")
if self._brightness is not None:
try:
self._brightness = int(
self._templates[CONF_BRIGHTNESS_TEMPLATE].
async_render_with_possible_json_value(payload)
)
except ValueError:
_LOGGER.warning("Invalid brightness value received")
if self._color_temp is not None:
try:
self._color_temp = int(
self._templates[CONF_COLOR_TEMP_TEMPLATE].
async_render_with_possible_json_value(payload)
)
except ValueError:
_LOGGER.warning("Invalid color temperature value received")
if self._hs is not None:
try:
red = int(
self._templates[CONF_RED_TEMPLATE].
async_render_with_possible_json_value(payload))
green = int(
self._templates[CONF_GREEN_TEMPLATE].
async_render_with_possible_json_value(payload))
blue = int(
self._templates[CONF_BLUE_TEMPLATE].
async_render_with_possible_json_value(payload))
self._hs = color_util.color_RGB_to_hs(red, green, blue)
except ValueError:
_LOGGER.warning("Invalid color value received")
if self._white_value is not None:
try:
self._white_value = int(
self._templates[CONF_WHITE_VALUE_TEMPLATE].
async_render_with_possible_json_value(payload)
)
except ValueError:
_LOGGER.warning('Invalid white value received')
if self._templates[CONF_EFFECT_TEMPLATE] is not None:
effect = self._templates[CONF_EFFECT_TEMPLATE].\
async_render_with_possible_json_value(payload)
if effect in self._effect_list:
self._effect = effect
else:
_LOGGER.warning("Unsupported effect value received")
self.async_schedule_update_ha_state()
if self._topics[CONF_STATE_TOPIC] is not None:
await mqtt.async_subscribe(
self.hass, self._topics[CONF_STATE_TOPIC], state_received,
self._qos)
if self._optimistic and last_state:
self._state = last_state.state == STATE_ON
if last_state.attributes.get(ATTR_BRIGHTNESS):
self._brightness = last_state.attributes.get(ATTR_BRIGHTNESS)
if last_state.attributes.get(ATTR_HS_COLOR):
self._hs = last_state.attributes.get(ATTR_HS_COLOR)
if last_state.attributes.get(ATTR_COLOR_TEMP):
self._color_temp = last_state.attributes.get(ATTR_COLOR_TEMP)
if last_state.attributes.get(ATTR_EFFECT):
self._effect = last_state.attributes.get(ATTR_EFFECT)
if last_state.attributes.get(ATTR_WHITE_VALUE):
self._white_value = last_state.attributes.get(ATTR_WHITE_VALUE)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the color temperature in mired."""
return self._color_temp
@property
def hs_color(self):
"""Return the hs color value [int, int]."""
return self._hs
@property
def white_value(self):
"""Return the white property."""
return self._white_value
@property
def should_poll(self):
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def is_on(self):
"""Return True if entity is on."""
return self._state
@property
def assumed_state(self):
"""Return True if unable to access real state of the entity."""
return self._optimistic
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._effect_list
@property
def effect(self):
"""Return the current effect."""
return self._effect
async def async_turn_on(self, **kwargs):
"""Turn the entity on.
This method is a coroutine.
"""
values = {'state': True}
if self._optimistic:
self._state = True
if ATTR_BRIGHTNESS in kwargs:
values['brightness'] = int(kwargs[ATTR_BRIGHTNESS])
if self._optimistic:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs:
values['color_temp'] = int(kwargs[ATTR_COLOR_TEMP])
if self._optimistic:
self._color_temp = kwargs[ATTR_COLOR_TEMP]
if ATTR_HS_COLOR in kwargs:
hs_color = kwargs[ATTR_HS_COLOR]
# If there's a brightness topic set, we don't want to scale the RGB
# values given using the brightness.
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
brightness = 255
else:
brightness = kwargs.get(
ATTR_BRIGHTNESS, self._brightness if self._brightness else
255)
rgb = color_util.color_hsv_to_RGB(
hs_color[0], hs_color[1], brightness / 255 * 100)
values['red'] = rgb[0]
values['green'] = rgb[1]
values['blue'] = rgb[2]
if self._optimistic:
self._hs = kwargs[ATTR_HS_COLOR]
if ATTR_WHITE_VALUE in kwargs:
values['white_value'] = int(kwargs[ATTR_WHITE_VALUE])
if self._optimistic:
self._white_value = kwargs[ATTR_WHITE_VALUE]
if ATTR_EFFECT in kwargs:
values['effect'] = kwargs.get(ATTR_EFFECT)
if ATTR_FLASH in kwargs:
values['flash'] = kwargs.get(ATTR_FLASH)
if ATTR_TRANSITION in kwargs:
values['transition'] = int(kwargs[ATTR_TRANSITION])
mqtt.async_publish(
self.hass, self._topics[CONF_COMMAND_TOPIC],
self._templates[CONF_COMMAND_ON_TEMPLATE].async_render(**values),
self._qos, self._retain
)
if self._optimistic:
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off.
This method is a coroutine.
"""
values = {'state': False}
if self._optimistic:
self._state = False
if ATTR_TRANSITION in kwargs:
values['transition'] = int(kwargs[ATTR_TRANSITION])
mqtt.async_publish(
self.hass, self._topics[CONF_COMMAND_TOPIC],
self._templates[CONF_COMMAND_OFF_TEMPLATE].async_render(**values),
self._qos, self._retain
)
if self._optimistic:
self.async_schedule_update_ha_state()
@property
def supported_features(self):
"""Flag supported features."""
features = (SUPPORT_FLASH | SUPPORT_TRANSITION)
if self._brightness is not None:
features = features | SUPPORT_BRIGHTNESS
if self._hs is not None:
features = features | SUPPORT_COLOR
if self._effect_list is not None:
features = features | SUPPORT_EFFECT
if self._color_temp is not None:
features = features | SUPPORT_COLOR_TEMP
if self._white_value is not None:
features = features | SUPPORT_WHITE_VALUE
return features
| apache-2.0 |
s0930342674/pyload | module/lib/Getch.py | 43 | 2048 | class Getch:
"""
Gets a single character from standard input. Does not echo to
the screen.
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchMacCarbon()
except(AttributeError, ImportError):
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty
import sys
def __call__(self):
import sys
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
class _GetchMacCarbon:
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
import Carbon
Carbon.Evt #see if it has this (in Unix, it doesn't)
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0] == 0: # 0x0008 is the keyDownMask
return ''
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what, msg, when, where, mod) = Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg) | gpl-3.0 |
mquandalle/rethinkdb | external/v8_3.30.33.16/build/gyp/test/win/gyptest-link-restat-importlib.py | 218 | 1219 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure we don't cause unnecessary builds due to import libs appearing
to be out of date.
"""
import TestGyp
import sys
import time
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'importlib'
test.run_gyp('importlib.gyp', chdir=CHDIR)
test.build('importlib.gyp', test.ALL, chdir=CHDIR)
# Delay briefly so that there's time for this touch not to have the
# timestamp as the previous run.
test.sleep()
# Touch the .cc file; the .dll will rebuild, but the import libs timestamp
# won't be updated.
test.touch('importlib/has-exports.cc')
test.build('importlib.gyp', 'test_importlib', chdir=CHDIR)
# This is the important part. The .dll above will relink and have an updated
# timestamp, however the import .libs timestamp won't be updated. So, we
# have to handle restating inputs in ninja so the final binary doesn't
# continually relink (due to thinking the .lib isn't up to date).
test.up_to_date('importlib.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
| agpl-3.0 |
mikebrevard/UnixAdministration | deliverables/Extra Credit/etc/data/genData/venv/lib/python3.4/site-packages/pip/_vendor/distlib/markers.py | 1261 | 6282 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Parser for the environment markers micro-language defined in PEP 345."""
import ast
import os
import sys
import platform
from .compat import python_implementation, string_types
from .util import in_venv
__all__ = ['interpret']
class Evaluator(object):
"""
A limited evaluator for Python expressions.
"""
operators = {
'eq': lambda x, y: x == y,
'gt': lambda x, y: x > y,
'gte': lambda x, y: x >= y,
'in': lambda x, y: x in y,
'lt': lambda x, y: x < y,
'lte': lambda x, y: x <= y,
'not': lambda x: not x,
'noteq': lambda x, y: x != y,
'notin': lambda x, y: x not in y,
}
allowed_values = {
'sys_platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os_name': os.name,
'platform_in_venv': str(in_venv()),
'platform_release': platform.release(),
'platform_version': platform.version(),
'platform_machine': platform.machine(),
'platform_python_implementation': python_implementation(),
}
def __init__(self, context=None):
"""
Initialise an instance.
:param context: If specified, names are looked up in this mapping.
"""
self.context = context or {}
self.source = None
def get_fragment(self, offset):
"""
Get the part of the source which is causing a problem.
"""
fragment_len = 10
s = '%r' % (self.source[offset:offset + fragment_len])
if offset + fragment_len < len(self.source):
s += '...'
return s
def get_handler(self, node_type):
"""
Get a handler for the specified AST node type.
"""
return getattr(self, 'do_%s' % node_type, None)
def evaluate(self, node, filename=None):
"""
Evaluate a source string or node, using ``filename`` when
displaying errors.
"""
if isinstance(node, string_types):
self.source = node
kwargs = {'mode': 'eval'}
if filename:
kwargs['filename'] = filename
try:
node = ast.parse(node, **kwargs)
except SyntaxError as e:
s = self.get_fragment(e.offset)
raise SyntaxError('syntax error %s' % s)
node_type = node.__class__.__name__.lower()
handler = self.get_handler(node_type)
if handler is None:
if self.source is None:
s = '(source not available)'
else:
s = self.get_fragment(node.col_offset)
raise SyntaxError("don't know how to evaluate %r %s" % (
node_type, s))
return handler(node)
def get_attr_key(self, node):
assert isinstance(node, ast.Attribute), 'attribute node expected'
return '%s.%s' % (node.value.id, node.attr)
def do_attribute(self, node):
if not isinstance(node.value, ast.Name):
valid = False
else:
key = self.get_attr_key(node)
valid = key in self.context or key in self.allowed_values
if not valid:
raise SyntaxError('invalid expression: %s' % key)
if key in self.context:
result = self.context[key]
else:
result = self.allowed_values[key]
return result
def do_boolop(self, node):
result = self.evaluate(node.values[0])
is_or = node.op.__class__ is ast.Or
is_and = node.op.__class__ is ast.And
assert is_or or is_and
if (is_and and result) or (is_or and not result):
for n in node.values[1:]:
result = self.evaluate(n)
if (is_or and result) or (is_and and not result):
break
return result
def do_compare(self, node):
def sanity_check(lhsnode, rhsnode):
valid = True
if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str):
valid = False
#elif (isinstance(lhsnode, ast.Attribute)
# and isinstance(rhsnode, ast.Attribute)):
# klhs = self.get_attr_key(lhsnode)
# krhs = self.get_attr_key(rhsnode)
# valid = klhs != krhs
if not valid:
s = self.get_fragment(node.col_offset)
raise SyntaxError('Invalid comparison: %s' % s)
lhsnode = node.left
lhs = self.evaluate(lhsnode)
result = True
for op, rhsnode in zip(node.ops, node.comparators):
sanity_check(lhsnode, rhsnode)
op = op.__class__.__name__.lower()
if op not in self.operators:
raise SyntaxError('unsupported operation: %r' % op)
rhs = self.evaluate(rhsnode)
result = self.operators[op](lhs, rhs)
if not result:
break
lhs = rhs
lhsnode = rhsnode
return result
def do_expression(self, node):
return self.evaluate(node.body)
def do_name(self, node):
valid = False
if node.id in self.context:
valid = True
result = self.context[node.id]
elif node.id in self.allowed_values:
valid = True
result = self.allowed_values[node.id]
if not valid:
raise SyntaxError('invalid expression: %s' % node.id)
return result
def do_str(self, node):
return node.s
def interpret(marker, execution_context=None):
"""
Interpret a marker and return a result depending on environment.
:param marker: The marker to interpret.
:type marker: str
:param execution_context: The context used for name lookup.
:type execution_context: mapping
"""
return Evaluator(execution_context).evaluate(marker.strip())
| mit |
nicproulx/mne-python | mne/realtime/client.py | 3 | 10948 | # Authors: Christoph Dinh <chdinh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from __future__ import print_function
import socket
import time
from ..externals.six.moves import StringIO
import threading
import numpy as np
from ..utils import logger, verbose
from ..io.constants import FIFF
from ..io.meas_info import read_meas_info
from ..io.tag import Tag, read_tag
from ..io.tree import make_dir_tree
# Constants for fiff realtime fiff messages
MNE_RT_GET_CLIENT_ID = 1
MNE_RT_SET_CLIENT_ALIAS = 2
def _recv_tag_raw(sock):
"""Read a tag and the associated data from a socket.
Parameters
----------
sock : socket.socket
The socket from which to read the tag.
Returns
-------
tag : instance of Tag
The tag.
buff : str
The raw data of the tag (including header).
"""
s = sock.recv(4 * 4)
if len(s) != 16:
raise RuntimeError('Not enough bytes received, something is wrong. '
'Make sure the mne_rt_server is running.')
tag = Tag(*np.fromstring(s, '>i4'))
n_received = 0
rec_buff = [s]
while n_received < tag.size:
n_buffer = min(4096, tag.size - n_received)
this_buffer = sock.recv(n_buffer)
rec_buff.append(this_buffer)
n_received += len(this_buffer)
if n_received != tag.size:
raise RuntimeError('Not enough bytes received, something is wrong. '
'Make sure the mne_rt_server is running.')
buff = ''.join(rec_buff)
return tag, buff
def _buffer_recv_worker(rt_client, nchan):
"""Worker thread that constantly receives buffers."""
try:
for raw_buffer in rt_client.raw_buffers(nchan):
rt_client._push_raw_buffer(raw_buffer)
except RuntimeError as err:
# something is wrong, the server stopped (or something)
rt_client._recv_thread = None
print('Buffer receive thread stopped: %s' % err)
class RtClient(object):
"""Realtime Client.
Client to communicate with mne_rt_server
Parameters
----------
host : str
Hostname (or IP address) of the host where mne_rt_server is running.
cmd_port : int
Port to use for the command connection.
data_port : int
Port to use for the data connection.
timeout : float
Communication timeout in seconds.
verbose : bool, str, int, or None
Log verbosity (see :func:`mne.verbose` and
:ref:`Logging documentation <tut_logging>` for more).
"""
@verbose
def __init__(self, host, cmd_port=4217, data_port=4218, timeout=1.0,
verbose=None): # noqa: D102
self._host = host
self._data_port = data_port
self._cmd_port = cmd_port
self._timeout = timeout
try:
self._cmd_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._cmd_sock.settimeout(timeout)
self._cmd_sock.connect((host, cmd_port))
self._cmd_sock.setblocking(0)
except Exception:
raise RuntimeError('Setting up command connection (host: %s '
'port: %d) failed. Make sure mne_rt_server '
'is running. ' % (host, cmd_port))
try:
self._data_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._data_sock.settimeout(timeout)
self._data_sock.connect((host, data_port))
self._data_sock.setblocking(1)
except Exception:
raise RuntimeError('Setting up data connection (host: %s '
'port: %d) failed. Make sure mne_rt_server '
'is running.' % (host, data_port))
self.verbose = verbose
# get my client ID
self._client_id = self.get_client_id()
self._recv_thread = None
self._recv_callbacks = list()
def _send_command(self, command):
"""Send a command to the server.
Parameters
----------
command : str
The command to send.
Returns
-------
resp : str
The response from the server.
"""
logger.debug('Sending command: %s' % command)
command += '\n'
self._cmd_sock.sendall(command.encode('utf-8'))
buf, chunk, begin = [], '', time.time()
while True:
# if we got some data, then break after wait sec
if buf and time.time() - begin > self._timeout:
break
# if we got no data at all, wait a little longer
elif time.time() - begin > self._timeout * 2:
break
try:
chunk = self._cmd_sock.recv(8192)
if chunk:
buf.append(chunk)
begin = time.time()
else:
time.sleep(0.1)
except:
pass
return ''.join(buf)
def _send_fiff_command(self, command, data=None):
"""Send a command through the data connection as a fiff tag.
Parameters
----------
command : int
The command code.
data : str
Additional data to send.
"""
kind = FIFF.FIFF_MNE_RT_COMMAND
type = FIFF.FIFFT_VOID
size = 4
if data is not None:
size += len(data) # first 4 bytes are the command code
next = 0
msg = np.array(kind, dtype='>i4').tostring()
msg += np.array(type, dtype='>i4').tostring()
msg += np.array(size, dtype='>i4').tostring()
msg += np.array(next, dtype='>i4').tostring()
msg += np.array(command, dtype='>i4').tostring()
if data is not None:
msg += np.array(data, dtype='>c').tostring()
self._data_sock.sendall(msg)
def get_measurement_info(self):
"""Get the measurement information.
Returns
-------
info : dict
The measurement information.
"""
cmd = 'measinfo %d' % self._client_id
self._send_command(cmd)
buff = []
directory = []
pos = 0
while True:
tag, this_buff = _recv_tag_raw(self._data_sock)
tag.pos = pos
pos += 16 + tag.size
directory.append(tag)
buff.append(this_buff)
if tag.kind == FIFF.FIFF_BLOCK_END and tag.type == FIFF.FIFFT_INT:
val = np.fromstring(this_buff[-4:], dtype=">i4")
if val == FIFF.FIFFB_MEAS_INFO:
break
buff = ''.join(buff)
fid = StringIO(buff)
tree, _ = make_dir_tree(fid, directory)
info, meas = read_meas_info(fid, tree)
return info
def set_client_alias(self, alias):
"""Set client alias.
Parameters
----------
alias : str
The client alias.
"""
self._send_fiff_command(MNE_RT_SET_CLIENT_ALIAS, alias)
def get_client_id(self):
"""Get the client ID.
Returns
-------
id : int
The client ID.
"""
self._send_fiff_command(MNE_RT_GET_CLIENT_ID)
# ID is send as answer
tag, buff = _recv_tag_raw(self._data_sock)
if (tag.kind == FIFF.FIFF_MNE_RT_CLIENT_ID and
tag.type == FIFF.FIFFT_INT):
client_id = int(np.fromstring(buff[-4:], dtype=">i4"))
else:
raise RuntimeError('wrong tag received')
return client_id
def start_measurement(self):
"""Start the measurement."""
cmd = 'start %d' % self._client_id
self._send_command(cmd)
def stop_measurement(self):
"""Stop the measurement."""
self._send_command('stop-all')
def start_receive_thread(self, nchan):
"""Start the receive thread.
If the measurement has not been started, it will also be started.
Parameters
----------
nchan : int
The number of channels in the data.
"""
if self._recv_thread is None:
self.start_measurement()
self._recv_thread = threading.Thread(target=_buffer_recv_worker,
args=(self, nchan))
self._recv_thread.start()
def stop_receive_thread(self, stop_measurement=False):
"""Stop the receive thread.
Parameters
----------
stop_measurement : bool
Also stop the measurement.
"""
if self._recv_thread is not None:
self._recv_thread.stop()
self._recv_thread = None
if stop_measurement:
self.stop_measurement()
def register_receive_callback(self, callback):
"""Register a raw buffer receive callback.
Parameters
----------
callback : callable
The callback. The raw buffer is passed as the first parameter
to callback.
"""
if callback not in self._recv_callbacks:
self._recv_callbacks.append(callback)
def unregister_receive_callback(self, callback):
"""Unregister a raw buffer receive callback.
Parameters
----------
callback : function
The callback to unregister.
"""
if callback in self._recv_callbacks:
self._recv_callbacks.remove(callback)
def _push_raw_buffer(self, raw_buffer):
"""Push raw buffer to clients using callbacks."""
for callback in self._recv_callbacks:
callback(raw_buffer)
def read_raw_buffer(self, nchan):
"""Read a single buffer with raw data.
Parameters
----------
nchan : int
The number of channels (info['nchan']).
Returns
-------
raw_buffer : float array, shape=(nchan, n_times)
The raw data.
"""
tag, this_buff = _recv_tag_raw(self._data_sock)
# skip tags until we get a data buffer
while tag.kind != FIFF.FIFF_DATA_BUFFER:
tag, this_buff = _recv_tag_raw(self._data_sock)
buff = StringIO(this_buff)
tag = read_tag(buff)
raw_buffer = tag.data.reshape(-1, nchan).T
return raw_buffer
def raw_buffers(self, nchan):
"""Return an iterator over raw buffers.
Parameters
----------
nchan : int
The number of channels (info['nchan']).
Returns
-------
raw_buffer : generator
Generator for iteration over raw buffers.
"""
while True:
raw_buffer = self.read_raw_buffer(nchan)
if raw_buffer is not None:
yield raw_buffer
else:
break
| bsd-3-clause |
savi-dev/quantum | quantum/plugins/cisco/nova/quantum_port_aware_scheduler.py | 1 | 4417 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
#
"""
Quantum Port Aware Scheduler Implementation
"""
from nova import exception as excp
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova.scheduler import chance
from quantumclient import Client
LOG = logging.getLogger(__name__)
quantum_opts = [
cfg.StrOpt('quantum_connection_host',
default='127.0.0.1',
help='HOST for connecting to quantum'),
cfg.StrOpt('quantum_connection_port',
default='9696',
help='PORT for connecting to quantum'),
cfg.StrOpt('quantum_default_tenant_id',
default="default",
help='Default tenant id when creating quantum networks'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(quantum_opts)
HOST = FLAGS.quantum_connection_host
PORT = FLAGS.quantum_connection_port
USE_SSL = False
VERSION = '1.0'
URI_PREFIX_CSCO = '/extensions/csco/tenants/{tenant_id}'
TENANT_ID = 'nova'
CSCO_EXT_NAME = 'Cisco Nova Tenant'
ACTION = '/schedule_host'
class QuantumPortAwareScheduler(chance.ChanceScheduler):
"""
Quantum network service dependent scheduler
Obtains the hostname from Quantum using an extension API
"""
def __init__(self):
# We have to send a dummy tenant name here since the client
# needs some tenant name, but the tenant name will not be used
# since the extensions URL does not require it
LOG.debug("Initializing Cisco Quantum Port-aware Scheduler...")
super(QuantumPortAwareScheduler, self).__init__()
client = Client(HOST, PORT, USE_SSL, format='json', version=VERSION,
uri_prefix="", tenant="dummy", logger=LOG)
request_url = "/extensions"
data = client.do_request('GET', request_url)
LOG.debug("Obtained supported extensions from Quantum: %s" % data)
for ext in data['extensions']:
name = ext['name']
if name == CSCO_EXT_NAME:
LOG.debug("Quantum plugin supports required \"%s\" extension"
"for the scheduler." % name)
return
LOG.error("Quantum plugin does not support required \"%s\" extension"
" for the scheduler. Scheduler will quit." % CSCO_EXT_NAME)
raise excp.ServiceUnavailable()
def _schedule(self, context, topic, request_spec, **kwargs):
"""Gets the host name from the Quantum service"""
LOG.debug("Cisco Quantum Port-aware Scheduler is scheduling...")
instance_id = request_spec['instance_properties']['uuid']
user_id = request_spec['instance_properties']['user_id']
project_id = request_spec['instance_properties']['project_id']
instance_data_dict = {'novatenant':
{'instance_id': instance_id,
'instance_desc':
{'user_id': user_id,
'project_id': project_id}}}
client = Client(HOST, PORT, USE_SSL, format='json', version=VERSION,
uri_prefix=URI_PREFIX_CSCO, tenant=TENANT_ID,
logger=LOG)
request_url = "/novatenants/" + project_id + ACTION
data = client.do_request('PUT', request_url, body=instance_data_dict)
hostname = data["host_list"]["host_1"]
if not hostname:
raise excp.NoValidHost(_("Scheduler was unable to locate a host"
" for this request. Is the appropriate"
" service running?"))
LOG.debug(_("Quantum service returned host: %s") % hostname)
return hostname
| apache-2.0 |
piquadrat/django | tests/admin_inlines/tests.py | 17 | 44526 | from django.contrib.admin import ModelAdmin, TabularInline
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.test import RequestFactory, TestCase, override_settings
from django.urls import reverse
from .admin import InnerInline, site as admin_site
from .models import (
Author, BinaryTree, Book, Chapter, Child, ChildModel1, ChildModel2,
Fashionista, FootNote, Holder, Holder2, Holder3, Holder4, Inner, Inner2,
Inner3, Inner4Stacked, Inner4Tabular, Novel, OutfitItem, Parent,
ParentModelWithCustomPk, Person, Poll, Profile, ProfileCollection,
Question, Sighting, SomeChildModel, SomeParentModel, Teacher,
)
INLINE_CHANGELINK_HTML = 'class="inlinechangelink">Change</a>'
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', email='super@example.com', password='secret')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInline(TestDataMixin, TestCase):
def setUp(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
self.client.force_login(self.superuser)
self.factory = RequestFactory()
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
holder = Holder.objects.get(dummy=13)
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
inner_formset = response.context['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author-book relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't carry her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post(reverse('admin:admin_inlines_fashionista_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_custom_form_tabular_inline_label(self):
"""
A model form with a form field specified (TitleForm.title1) should have
its label rendered in the tabular inline.
"""
response = self.client.get(reverse('admin:admin_inlines_titlecollection_add'))
self.assertContains(response, '<th class="required">Title1</th>', html=True)
def test_custom_form_tabular_inline_overridden_label(self):
"""
SomeChildModelForm.__init__() overrides the label of a form field.
That label is displayed in the TabularInline.
"""
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_add'))
field = list(response.context['inline_admin_formset'].fields())[0]
self.assertEqual(field['label'], 'new label')
self.assertContains(response, '<th class="required">New label</th>', html=True)
def test_tabular_non_field_errors(self):
"""
non_field_errors are displayed correctly, including the correct value
for colspan.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post(reverse('admin:admin_inlines_titlecollection_add'), data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbox.
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield">'
'<li>The two titles must be the same</li></ul></td></tr>'
)
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get(reverse('admin:admin_inlines_novel_add'))
self.assertEqual(response.status_code, 200)
# View should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="chapter_set-group"'
)
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get(reverse('admin:admin_inlines_poll_add'))
self.assertEqual(response.status_code, 200)
# Add parent object view should have the child inlines section
self.assertContains(
response,
'<div class="js-inline-admin-formset inline-group" id="question_set-group"'
)
# The right callable should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
The inlines' model field help texts are displayed when using both the
stacked and tabular layouts.
"""
response = self.client.get(reverse('admin:admin_inlines_holder4_add'))
self.assertContains(response, '<div class="help">Awesome stacked help text is awesome.</div>', 4)
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Awesome tabular help text is awesome.)" '
'title="Awesome tabular help text is awesome." />',
1
)
# ReadOnly fields
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text for ReadOnlyInline)" '
'title="Help text for ReadOnlyInline" />',
1
)
def test_inline_hidden_field_no_column(self):
"""#18263 -- Make sure hidden fields don't get a column in tabular inlines"""
parent = SomeParentModel.objects.create(name='a')
SomeChildModel.objects.create(name='b', position='0', parent=parent)
SomeChildModel.objects.create(name='c', position='1', parent=parent)
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_change', args=(parent.pk,)))
self.assertNotContains(response, '<td class="field-position">')
self.assertInHTML(
'<input id="id_somechildmodel_set-1-position" '
'name="somechildmodel_set-1-position" type="hidden" value="1" />',
response.rendered_content,
)
def test_non_related_name_inline(self):
"""
Multiple inlines with related_name='+' have correct form prefixes.
"""
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(response, '<input type="hidden" name="-1-0-id" id="id_-1-0-id" />', html=True)
self.assertContains(
response,
'<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia" />',
html=True
)
self.assertContains(
response,
'<input id="id_-1-0-name" type="text" class="vTextField" name="-1-0-name" maxlength="100" />',
html=True
)
self.assertContains(response, '<input type="hidden" name="-2-0-id" id="id_-2-0-id" />', html=True)
self.assertContains(
response,
'<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia" />',
html=True
)
self.assertContains(
response,
'<input id="id_-2-0-name" type="text" class="vTextField" name="-2-0-name" maxlength="100" />',
html=True
)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
The "View on Site" link is correct for locales that use thousand
separators.
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='')
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.id,)))
inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
The "View on Site" link is correct for models with a custom primary key
field.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk)
child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
An object can be created with inlines when it inherits another class.
"""
data = {
'name': 'Martian',
'sighting_set-TOTAL_FORMS': 1,
'sighting_set-INITIAL_FORMS': 0,
'sighting_set-MAX_NUM_FORMS': 0,
'sighting_set-0-place': 'Zone 51',
'_save': 'Save',
}
response = self.client.post(reverse('admin:admin_inlines_extraterrestrial_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = (
'<input id="id_binarytree_set-MAX_NUM_FORMS" '
'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d" />'
)
# The total number of forms will remain the same in either case
total_forms_hidden = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2" />'
)
response = self.client.get(reverse('admin:admin_inlines_binarytree_add'))
self.assertInHTML(max_forms_input % 3, response.rendered_content)
self.assertInHTML(total_forms_hidden, response.rendered_content)
response = self.client.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
self.assertInHTML(max_forms_input % 2, response.rendered_content)
self.assertInHTML(total_forms_hidden, response.rendered_content)
def test_min_num(self):
"""
min_num and extra determine number of forms.
"""
class MinNumInline(TabularInline):
model = BinaryTree
min_num = 2
extra = 3
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2" />'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5" />'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertInHTML(min_forms, response.rendered_content)
self.assertInHTML(total_forms, response.rendered_content)
def test_custom_min_num(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
class MinNumInline(TabularInline):
model = BinaryTree
extra = 3
def get_min_num(self, request, obj=None, **kwargs):
if obj:
return 5
return 2
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d" />'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d" />'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertInHTML(min_forms % 2, response.rendered_content)
self.assertInHTML(total_forms % 5, response.rendered_content)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(bt_head.id))
self.assertInHTML(min_forms % 5, response.rendered_content)
self.assertInHTML(total_forms % 8, response.rendered_content)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input id="id_nonautopkbook_set-0-rand_pk" '
'name="nonautopkbook_set-0-rand_pk" type="hidden" />',
html=True
)
self.assertContains(
response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" '
'name="nonautopkbook_set-2-0-rand_pk" type="hidden" />',
html=True
)
def test_inline_nonauto_noneditable_inherited_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input id="id_nonautopkbookchild_set-0-nonautopkbook_ptr" '
'name="nonautopkbookchild_set-0-nonautopkbook_ptr" type="hidden" />',
html=True
)
self.assertContains(
response,
'<input id="id_nonautopkbookchild_set-2-nonautopkbook_ptr" '
'name="nonautopkbookchild_set-2-nonautopkbook_ptr" type="hidden" />',
html=True
)
def test_inline_editable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" '
'name="editablepkbook_set-0-manual_pk" type="number" />',
html=True, count=1
)
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" '
'name="editablepkbook_set-2-0-manual_pk" type="number" />',
html=True, count=1
)
def test_stacked_inline_edit_form_contains_has_original_class(self):
holder = Holder.objects.create(dummy=1)
holder.inner_set.create(dummy=1)
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.pk,)))
self.assertContains(
response,
'<div class="inline-related has_original" id="inner_set-0">',
count=1
)
self.assertContains(
response,
'<div class="inline-related" id="inner_set-1">',
count=1
)
def test_inlines_show_change_link_registered(self):
"Inlines `show_change_link` for registered models when enabled."
holder = Holder4.objects.create(dummy=1)
item1 = Inner4Stacked.objects.create(dummy=1, holder=holder)
item2 = Inner4Tabular.objects.create(dummy=1, holder=holder)
items = (
('inner4stacked', item1.pk),
('inner4tabular', item2.pk),
)
response = self.client.get(reverse('admin:admin_inlines_holder4_change', args=(holder.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
for model, pk in items:
url = reverse('admin:admin_inlines_%s_change' % model, args=(pk,))
self.assertContains(response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML))
def test_inlines_show_change_link_unregistered(self):
"Inlines `show_change_link` disabled for unregistered models."
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
self.assertFalse(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_tabular_inline_show_change_link_false_registered(self):
"Inlines `show_change_link` disabled by default."
poll = Poll.objects.create(name="New poll")
Question.objects.create(poll=poll)
response = self.client.get(reverse('admin:admin_inlines_poll_change', args=(poll.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineMedia(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder3_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineAdminForm(TestCase):
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlineProtectedOnDelete(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_deleting_inline_with_protected_delete_does_not_validate(self):
lotr = Novel.objects.create(name='Lord of the rings')
chapter = Chapter.objects.create(novel=lotr, name='Many Meetings')
foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda')
change_url = reverse('admin:admin_inlines_novel_change', args=(lotr.id,))
response = self.client.get(change_url)
data = {
'name': lotr.name,
'chapter_set-TOTAL_FORMS': 1,
'chapter_set-INITIAL_FORMS': 1,
'chapter_set-MAX_NUM_FORMS': 1000,
'_save': 'Save',
'chapter_set-0-id': chapter.id,
'chapter_set-0-name': chapter.name,
'chapter_set-0-novel': lotr.id,
'chapter_set-0-DELETE': 'on'
}
response = self.client.post(change_url, data)
self.assertContains(response, "Deleting chapter %s would require deleting "
"the following protected related objects: foot note %s"
% (chapter, foot_note))
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
def setUp(self):
self.user = User(username='admin')
self.user.is_staff = True
self.user.is_active = True
self.user.set_password('secret')
self.user.save()
self.author_ct = ContentType.objects.get_for_model(Author)
self.holder_ct = ContentType.objects.get_for_model(Holder2)
self.book_ct = ContentType.objects.get_for_model(Book)
self.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name='The Author')
book = author.books.create(name='The inline Book')
self.author_change_url = reverse('admin:admin_inlines_author_change', args=(author.id,))
# Get the ID of the automatically created intermediate model for the Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book)
self.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
holder = Holder2.objects.create(dummy=13)
inner2 = Inner2.objects.create(dummy=42, holder=holder)
self.holder_change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
self.inner2_id = inner2.id
self.client.force_login(self.user)
def test_inline_add_m2m_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author-book relationship')
self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS" />', html=True)
self.assertContains(
response,
'<input type="hidden" id="id_Author_books-0-id" value="%i" '
'name="Author_books-0-id" />' % self.author_book_auto_m2m_intermediate_id,
html=True
)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" '
'name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertNotContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>')
# Just the one form for existing instances
self.assertContains(
response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
# max-num 0 means we can't add new ones
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" value="0" name="inner2_set-MAX_NUM_FORMS" />',
html=True
)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(
response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, three for new
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id" />' % self.inner2_id,
html=True
)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
@override_settings(ROOT_URLCONF='admin_inlines.urls')
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ['admin_inlines'] + AdminSeleniumTestCase.available_apps
def setUp(self):
User.objects.create_superuser(username='super', password='secret', email='super@example.com')
def test_add_stackeds(self):
"""
The "Add another XXX" link correctly adds items to the stacked formset.
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4stacked_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
self.assertEqual(rows_length(), 4)
def test_delete_stackeds(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add'))
inline_id = '#inner4stacked_set-group'
def rows_length():
return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
add_button.click()
self.assertEqual(rows_length(), 5, msg="sanity check")
for delete_link in self.selenium.find_elements_by_css_selector('%s .inline-deletelink' % inline_id):
delete_link.click()
self.assertEqual(rows_length(), 3)
def test_add_inlines(self):
"""
The "Add another XXX" link correctly adds items to the inline form.
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add'))
# There's only one inline to start with and it has the correct ID.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 1)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[0].get_attribute('id'),
'profile_set-0')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1)
# Add an inline
self.selenium.find_element_by_link_text('Add another Profile').click()
# The inline has been added, it has the right id, and it contains the
# correct fields.
self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 2)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1)
# Let's add another one to be sure
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 3)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1)
# Enter some data and click 'Save'
self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1')
self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2')
self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1')
self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2')
self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1')
self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2')
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
# The objects have been created in the database
self.assertEqual(ProfileCollection.objects.all().count(), 1)
self.assertEqual(Profile.objects.all().count(), 3)
def test_delete_inlines(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 5)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1)
# Click on a few delete buttons
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click()
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click()
# The rows are gone and the IDs have been re-sequenced
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 3)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
def test_alternating_rows(self):
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
row_selector = 'form#profilecollection_form tr.dynamic-profile_set'
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row1" % row_selector)), 2, msg="Expect two row1 styled rows")
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row2" % row_selector)), 1, msg="Expect one row2 styled row")
def test_collapsed_inlines(self):
# Collapsed inlines have SHOW/HIDE links.
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_author_add'))
# One field is in a stacked inline, other in a tabular one.
test_fields = ['#id_nonautopkbook_set-0-title', '#id_nonautopkbook_set-2-0-title']
show_links = self.selenium.find_elements_by_link_text('SHOW')
self.assertEqual(len(show_links), 3)
for show_index, field_name in enumerate(test_fields, 0):
self.wait_until_invisible(field_name)
show_links[show_index].click()
self.wait_until_visible(field_name)
hide_links = self.selenium.find_elements_by_link_text('HIDE')
self.assertEqual(len(hide_links), 2)
for hide_index, field_name in enumerate(test_fields, 0):
self.wait_until_visible(field_name)
hide_links[hide_index].click()
self.wait_until_invisible(field_name)
| bsd-3-clause |
dmlux/UZLMathLib | gtest-1.7.0/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| bsd-3-clause |
ol-loginov/intellij-community | python/lib/Lib/site-packages/django/conf/locale/ka/formats.py | 329 | 1888 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'l, j F, Y'
TIME_FORMAT = 'h:i:s a'
DATETIME_FORMAT = 'j F, Y h:i:s a'
YEAR_MONTH_FORMAT = 'F, Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j.M.Y'
SHORT_DATETIME_FORMAT = 'j.M.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%d %b %Y', '%d %b, %Y', '%d %b. %Y', # '25 Oct 2006', '25 Oct, 2006', '25 Oct. 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
# '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = " "
NUMBER_GROUPING = 3
| apache-2.0 |
zsdonghao/tensorlayer | tensorlayer/models/mobilenetv1.py | 1 | 7772 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""MobileNet for ImageNet."""
import os
import tensorflow as tf
from tensorlayer import logging
from tensorlayer.layers import Layer
from tensorlayer.layers import BatchNormLayer
from tensorlayer.layers import Conv2d
from tensorlayer.layers import DepthwiseConv2d
from tensorlayer.layers import FlattenLayer
from tensorlayer.layers import GlobalMeanPool2d
from tensorlayer.layers import InputLayer
from tensorlayer.layers import ReshapeLayer
from tensorlayer.files import maybe_download_and_extract, assign_params, load_npz
__all__ = [
'MobileNetV1',
]
class MobileNetV1(Layer):
"""Pre-trained MobileNetV1 model.
Parameters
------------
x : placeholder
shape [None, 224, 224, 3], value range [0, 1].
end_with : str
The end point of the model [conv, depth1, depth2 ... depth13, globalmeanpool, out]. Default ``out`` i.e. the whole model.
is_train : boolean
Whether the model is used for training i.e. enable dropout.
reuse : boolean
Whether to reuse the model.
Examples
---------
Classify ImageNet classes, see `tutorial_models_mobilenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_mobilenetv1.py>`__
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> # get the whole model
>>> net = tl.models.MobileNetV1(x)
>>> # restore pre-trained parameters
>>> sess = tf.InteractiveSession()
>>> net.restore_params(sess)
>>> # use for inferencing
>>> probs = tf.nn.softmax(net.outputs)
Extract features and Train a classifier with 100 classes
>>> x = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> # get model without the last layer
>>> cnn = tl.models.MobileNetV1(x, end_with='reshape')
>>> # add one more layer
>>> net = Conv2d(cnn, 100, (1, 1), (1, 1), name='out')
>>> net = FlattenLayer(net, name='flatten')
>>> # initialize all parameters
>>> sess = tf.InteractiveSession()
>>> tl.layers.initialize_global_variables(sess)
>>> # restore pre-trained parameters
>>> cnn.restore_params(sess)
>>> # train your own classifier (only update the last layer)
>>> train_params = tl.layers.get_variables_with_name('out')
Reuse model
>>> x1 = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> x2 = tf.placeholder(tf.float32, [None, 224, 224, 3])
>>> # get model without the last layer
>>> net1 = tl.models.MobileNetV1(x1, end_with='reshape')
>>> # reuse the parameters with different input
>>> net2 = tl.models.MobileNetV1(x2, end_with='reshape', reuse=True)
>>> # restore pre-trained parameters (as they share parameters, we don’t need to restore net2)
>>> sess = tf.InteractiveSession()
>>> net1.restore_params(sess)
"""
def __init__(self, x, end_with='out', is_train=False, reuse=None):
self.net = self.mobilenetv1(x, end_with, is_train, reuse)
self.outputs = self.net.outputs
self.all_params = list(self.net.all_params)
self.all_layers = list(self.net.all_layers)
self.all_drop = dict(self.net.all_drop)
self.print_layers = self.net.print_layers
self.print_params = self.net.print_params
# @classmethod
def mobilenetv1(self, x, end_with='out', is_train=False, reuse=None):
with tf.variable_scope("mobilenetv1", reuse=reuse):
n = InputLayer(x)
n = self.conv_block(n, 32, strides=(2, 2), is_train=is_train, name="conv")
if end_with in n.outputs.name:
return n
n = self.depthwise_conv_block(n, 64, is_train=is_train, name="depth1")
if end_with in n.outputs.name:
return n
n = self.depthwise_conv_block(n, 128, strides=(2, 2), is_train=is_train, name="depth2")
if end_with in n.outputs.name:
return n
n = self.depthwise_conv_block(n, 128, is_train=is_train, name="depth3")
if end_with in n.outputs.name:
return n
n = self.depthwise_conv_block(n, 256, strides=(2, 2), is_train=is_train, name="depth4")
if end_with in n.outputs.name:
return n
n = self.depthwise_conv_block(n, 256, is_train=is_train, name="depth5")
if end_with in n.outputs.name:
return n
n = self.depthwise_conv_block(n, 512, strides=(2, 2), is_train=is_train, name="depth6")
if end_with in n.outputs.name:
return n
n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth7")
if end_with in n.outputs.name:
return n
n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth8")
if end_with in n.outputs.name:
return n
n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth9")
if end_with in n.outputs.name:
return n
n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth10")
if end_with in n.outputs.name:
return n
n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth11")
if end_with in n.outputs.name:
return n
n = self.depthwise_conv_block(n, 1024, strides=(2, 2), is_train=is_train, name="depth12")
if end_with in n.outputs.name:
return n
n = self.depthwise_conv_block(n, 1024, is_train=is_train, name="depth13")
if end_with in n.outputs.name:
return n
n = GlobalMeanPool2d(n, name='globalmeanpool')
if end_with in n.outputs.name:
return n
# n = DropoutLayer(n, 1-1e-3, True, is_train, name='drop')
# n = DenseLayer(n, 1000, name='output') # equal
n = ReshapeLayer(n, [-1, 1, 1, 1024], name='reshape')
if end_with in n.outputs.name:
return n
n = Conv2d(n, 1000, (1, 1), (1, 1), name='out')
n = FlattenLayer(n, name='flatten')
if end_with == 'out':
return n
raise Exception("end_with : conv, depth1, depth2 ... depth13, globalmeanpool, out")
@classmethod
def conv_block(cls, n, n_filter, filter_size=(3, 3), strides=(1, 1), is_train=False, name='conv_block'):
# ref: https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet.py
with tf.variable_scope(name):
n = Conv2d(n, n_filter, filter_size, strides, b_init=None, name='conv')
n = BatchNormLayer(n, decay=0.99, act=tf.nn.relu6, is_train=is_train, name='batchnorm')
return n
@classmethod
def depthwise_conv_block(cls, n, n_filter, strides=(1, 1), is_train=False, name="depth_block"):
with tf.variable_scope(name):
n = DepthwiseConv2d(n, (3, 3), strides, b_init=None, name='depthwise')
n = BatchNormLayer(n, decay=0.99, act=tf.nn.relu6, is_train=is_train, name='batchnorm1')
n = Conv2d(n, n_filter, (1, 1), (1, 1), b_init=None, name='conv')
n = BatchNormLayer(n, decay=0.99, act=tf.nn.relu6, is_train=is_train, name='batchnorm2')
return n
def restore_params(self, sess, path='models'):
logging.info("Restore pre-trained parameters")
maybe_download_and_extract(
'mobilenet.npz', path, 'https://github.com/tensorlayer/pretrained-models/raw/master/models/',
expected_bytes=25600116
) # ls -al
params = load_npz(name=os.path.join(path, 'mobilenet.npz'))
assign_params(sess, params[:len(self.net.all_params)], self.net)
del params
| apache-2.0 |
alikins/func-alikins-devel | test/unittest/test_groups_api.py | 4 | 16987 | from func.overlord.groups import Groups,get_hosts_spec
from certmaster.config import read_config, CONFIG_FILE
from certmaster.commonconfig import CMConfig
import os
import fnmatch
from func.overlord.group.conf_backend import ConfBackend
from func.overlord.group.sqlite_backend import SqliteBackend
TEST_DB_FILE = "/tmp/test_sqlite.db"
TEST_CONF_FILE = "/tmp/test_conf.conf"
class BaseMinions(object):
def create_dummy_minions(self,howmany=None):
"""
Creates a lots of minions so we can query
with different minion names cool isnt it
"""
cm_config = read_config(CONFIG_FILE, CMConfig)
howmany = howmany or 100 #it is a good default number
final_list = []
for m in xrange(howmany):
tmp_f = open("%s/%s.%s" % (cm_config.certroot,str(m),cm_config.cert_extension),"w")
tmp_f.close()
final_list.append(str(m))
print "%d dummy minions created "%howmany
return final_list
def clean_dummy_minions(self,howmany=None):
"""
Deletes a lots of minions garbage
"""
cm_config = read_config(CONFIG_FILE, CMConfig)
howmany = howmany or 100 #it is a good default number
for m in xrange(howmany):
tmp_f = "%s/%s.%s" % (cm_config.certroot,str(m), cm_config.cert_extension)
if os.path.exists(tmp_f):
os.remove(tmp_f)
print "%d dummy minions cleaned "%howmany
class BaseGroupT(object):
backends = [
{'backend':'sqlite','db_file':TEST_DB_FILE},
{'backend':'conf','conf_file':TEST_CONF_FILE}
]
def refresh_backend(self,g_object):
"""
Here you should add your object in if statements
"""
from func.overlord.group.conf_backend import ConfBackend
from func.overlord.group.sqlite_backend import SqliteBackend
if isinstance(g_object.backend,ConfBackend):
return Groups(**self.backends[1])
elif isinstance(g_object.backend,SqliteBackend):
return Groups(**self.backends[0])
else:
return None
def get_group_objects(self):
"""
Initializer
"""
gr_list = []
for b in self.backends:
gr_list.append(Groups(**b))
return gr_list
def clean_t_files(self,path):
"""
Clean the initialized stuff
"""
if os.path.exists(path):
os.remove(path)
class TestGroupApi(BaseGroupT,BaseMinions):
def setUp(self):
"""
Will be called after every
"""
#clean current files
self.clean_t_files(TEST_DB_FILE)
self.clean_t_files(TEST_CONF_FILE)
#destroy and create minions
self.clean_dummy_minions()
self.current_minions = self.create_dummy_minions()
#get groups
self.groups = self.get_group_objects()
def teardown(self):
"""
Clean the stuff
"""
self.clean_dummy_minions()
self.clean_t_files(TEST_DB_FILE)
self.clean_t_files(TEST_CONF_FILE)
def test_add_group(self):
"""
adds a single group item
"""
for g in self.groups:
assert g.add_group("group1",save=True)[0]== True
g = self.refresh_backend(g)
assert g.add_group("group1")[0] == False
def test_add_host_to_group(self):
"""
adds a host test
"""
g_name = "group1"
for g in self.groups:
g.add_group(g_name)
assert g.add_host_to_group(g_name,"host1")[0] == True
g = self.refresh_backend(g)
assert g.add_host_to_group(g_name,"host1")[0] == False
def test_add_hostst_to_group(self):
"""
Test adding hosts via string
"""
g_name = "group1"
for g in self.groups:
g.add_group(g_name)
g = self.refresh_backend(g)
g.add_hosts_to_group(g_name,"host1,host2,host3")
g = self.refresh_backend(g)
g.add_hosts_to_group(g_name,"host5;host7;host8")
def test_add_host_list(self):
"""
Test adding hosts via list
"""
g_name = "group1"
for g in self.groups:
g.add_group(g_name)
g = self.refresh_backend(g)
g.add_host_list(g_name,["host1","host2","host3"])
g = self.refresh_backend(g)
g.add_host_list(g_name,["host1","host2","host3"])
g = self.refresh_backend(g)
g.add_host_list(g_name,["host4","host5","host6"])
def test_add_hosts_to_group_glob(self):
"""
Test globbing addition
"""
g_name = "group1"
for g in self.groups:
g.add_group(g_name)
g = self.refresh_backend(g)
g.add_hosts_to_group_glob(g_name,"*") #add all of them
g = self.refresh_backend(g)
self.groups = self.get_group_objects()
for g in self.groups:
for h in self.current_minions:
if self.current_minions.index(h) %10 == 0:
print "Tests completed : ",self.current_minions.index(h)
assert g.add_host_to_group(g_name,h)[0] == False
#print "Let see IT ",g.add_host_to_group(g_name,h)[0]
g = self.refresh_backend(g)
#clear again so we can test exclude thing
self.teardown()
self.setUp()
#print "Testing exclude string ...."
self.groups = self.get_group_objects()
for g in self.groups:
g.add_group(g_name)
g = self.refresh_backend(g)
g.add_hosts_to_group_glob(g_name,"*",exclude_string="*[1,3,5,7,9]")
g = self.refresh_backend(g)
#add all of them
for h in self.current_minions:
#print "Checking for : ",h
if int(h)%2==0:
assert g.add_host_to_group(g_name,h)[0] == False
g = self.refresh_backend(g)
else:
assert g.add_host_to_group(g_name,h)[0] == True
g = self.refresh_backend(g)
def test_get_groups(self):
"""
test get groups
"""
for g in self.groups:
g.add_group("group1")
g = self.refresh_backend(g)
g.add_group("group2")
g = self.refresh_backend(g)
#get all groups
grs = g.get_groups()
assert self._t_compare_arrays(grs,["group1","group2"]) == True
#get one
tmg = g.get_groups(pattern="group1")
assert tmg==["group1"]
tmg = g.get_groups(pattern="gr",exact=False)
assert self._t_compare_arrays(tmg,["group1","group2"])==True
tmg = g.get_groups(pattern="gr",exact=False,exclude=["group2"])
assert tmg == ["group1"]
#test also an empty one
tmg = g.get_groups(pattern="group3")
assert tmg == []
def test_get_groups_glob(self):
"""
Globbing in groups
"""
for g in self.groups:
g.add_group("group1")
g = self.refresh_backend(g)
g.add_group("group2")
g = self.refresh_backend(g)
#get all groups
grs = g.get_groups_glob("*")
assert self._t_compare_arrays(grs,["group1","group2"]) == True
#get one
tmg = g.get_groups_glob("*[1]")
assert tmg == ["group1"]
tmg = g.get_groups_glob("*",exclude_string="*[2]")
assert tmg == ["group1"]
#test also an empty one
tmg = g.get_groups_glob("*[3]")
assert tmg == []
def test_get_hosts(self):
"""
Get hosts tests
"""
g_name = "group1"
for g in self.groups:
g.add_group(g_name)
g = self.refresh_backend(g)
g.add_host_list(g_name,["host1","host2","host3"])
g = self.refresh_backend(g)
hosts = g.get_hosts(group=g_name)
assert self._t_compare_arrays(hosts,["host1","host2","host3"]) == True
#get only one
host = g.get_hosts(pattern="host1",group=g_name)
assert host == ["host1"]
#get pattern
host = g.get_hosts(pattern="ho",group=g_name,exact=False)
assert self._t_compare_arrays(host,["host1","host2","host3"]) == True
host = g.get_hosts(pattern="ho",group=g_name,exact=False,exclude=["host1","host2"])
assert host==["host3"]
#an empty test also
host = g.get_hosts(pattern="host4")
assert host==[]
def test_get_hosts_glob(self):
"""
test hosts for glob strings
"""
g_name = "group1"
for g in self.groups:
g.add_group(g_name)
g = self.refresh_backend(g)
g.add_hosts_to_group_glob(g_name,"*") #add all of them
g = self.refresh_backend(g)
hosts = g.get_hosts_glob("@group1")
assert self._t_compare_arrays(hosts,self.current_minions) == True
#try subgroupping thing on the fly
hosts = g.get_hosts_glob("@group1:[0-9]")
assert self._t_compare_arrays(hosts,list(range(10))) == True
#try the exclude string
hosts = g.get_hosts_glob("@group1",exclude_string="@group1:[0-9][0-9]")
assert self._t_compare_arrays(hosts,list(range(10))) == True
hosts = g.get_hosts_glob("@group1:[1-5][0-9];@group1:[6-9][0-9]",exclude_string="@group1:[1-8][0-9];@group1:[9][0-9]")
assert self._t_compare_arrays(hosts,[]) == True
def test_remove_group(self):
"""
remove group test
"""
for g in self.groups:
g.add_group("group1")
g = self.refresh_backend(g)
#removing the group
assert g.remove_group("group1")[0] == True
g = self.refresh_backend(g)
assert g.remove_group("group1")[0] == False
g = self.refresh_backend(g)
grs = g.get_groups_glob("*")
assert grs == []
def test_remove_group_list(self):
"""
remove a list of groups
"""
for g in self.groups:
g.add_group("group1")
g = self.refresh_backend(g)
g.add_group("group2")
g = self.refresh_backend(g)
#removing the group
g.remove_group_list(["group1","group2"])
g = self.refresh_backend(g)
grs = g.get_groups_glob("*")
assert grs == []
def test_remove_group_glob(self):
"""
Remove groups by glob
"""
for g in self.groups:
g.add_group("group1")
g = self.refresh_backend(g)
g.add_group("group2")
g = self.refresh_backend(g)
#removing the group
g.remove_group_glob("gr*")
g = self.refresh_backend(g)
grs = g.get_groups_glob("*")
assert grs == []
def test_remove_host(self):
"""
remove host test
"""
g_name = "group1"
for g in self.groups:
g.add_group(g_name)
g = self.refresh_backend(g)
g.add_host_list(g_name,["host1","host2","host3"])
g = self.refresh_backend(g)
assert g.remove_host(g_name,"host1")[0] == True
g = self.refresh_backend(g)
assert g.remove_host(g_name,"host1")[0] == False
g = self.refresh_backend(g)
hosts = g.get_hosts(group=g_name)
assert self._t_compare_arrays(hosts,["host2","host3"])
assert g.remove_host(g_name,"host2")[0] ==True
g = self.refresh_backend(g)
hosts = g.get_hosts(group=g_name)
assert self._t_compare_arrays(hosts,["host3"])
def test_remove_host_list(self):
"""
Remove the host list
"""
g_name = "group1"
for g in self.groups:
g.add_group(g_name)
g = self.refresh_backend(g)
g.add_host_list(g_name,["host1","host2","host3"])
g = self.refresh_backend(g)
g.remove_host_list(g_name,["host1","host2"])
g = self.refresh_backend(g)
hosts = g.get_hosts(group=g_name)
assert hosts == ["host3"]
def test_remove_host_glob(self):
"""
Remove hosts bu glob
"""
g_name = "group1"
for g in self.groups:
g.add_group(g_name)
g = self.refresh_backend(g)
g.add_hosts_to_group_glob(g_name,"*") #add all of them
g = self.refresh_backend(g)
g.remove_host_glob("group1","*")
g = self.refresh_backend(g)
hosts = g.get_hosts_glob("@group1")
assert hosts==[]
g.add_hosts_to_group_glob(g_name,"*") #add all of them
g = self.refresh_backend(g)
#try subgroupping thing on the fly
g.remove_host_glob("group1","[0-9][0-9]")
g = self.refresh_backend(g)
hosts = g.get_hosts_glob("@group1:*")
assert self._t_compare_arrays(hosts,list(range(10))) == True
#try the exclude string
g.remove_host_glob("group1","*",exclude_string="[0-9][0-9]")
g = self.refresh_backend(g)
hosts = g.get_hosts_glob("@group1:*")
assert self._t_compare_arrays(hosts,[]) == True
def _t_compare_arrays(self,one,two):
return compare_arrays(one,two)
def compare_arrays(one,two):
if not one == two:
if not one or not two:
return False
else:
return True
two = [str(i) for i in two]
for o in one:
if not o in two:
return False
return True
from func.overlord.client import Minions
class TestMinionGroups(BaseMinions):
"""
Test the minion methods that wraps the group classes
"""
backends = [
{'groups_backend':'sqlite','db_file':TEST_DB_FILE},
{'groups_backend':'conf','conf_file':TEST_CONF_FILE}
]
def teardown(self):
for path in [TEST_DB_FILE,TEST_CONF_FILE]:
if os.path.exists(path):
os.remove(path)
self.clean_dummy_minions()
def setUp(self):
#destroy and create minions
self.clean_dummy_minions()
self.current_minions = self.create_dummy_minions()
#get groups
def test_get_urls(self):
for backend_dict in self.backends:
#create a minion with relevant backens
m = Minions("[0-9]",**backend_dict)
hosts = m.get_urls()
print hosts
def test_get_hosts_for_spec(self):
"""
Testing the minions just to pull things for a spec
"""
spec = "*"
m = Minions(spec)
minions = m.get_hosts_for_spec(spec)
assert compare_arrays(minions,self.current_minions) == True
def test_get_all_hosts(self):
"""
Getting all hosts
"""
for backend_dict in self.backends:
#create a minion with relevant backens
m = Minions("*",**backend_dict)
#create some groups and hosts into that Minion
m.group_class.add_group("group1")
m.group_class.add_hosts_to_group_glob("group1","[0-9]")
hosts = m.get_all_hosts()
assert compare_arrays(hosts,self.current_minions) == True
#now test with grouping
m = Minions("[1][0-9];@group1:*",**backend_dict)
hosts = m.get_all_hosts()
assert compare_arrays(hosts,range(20)) == True
m = Minions("[1][0-5];@group1:[5-9]",**backend_dict)
hosts = m.get_all_hosts()
assert compare_arrays(hosts,range(5,16)) == True
#do some testing about exclude string
m = Minions("*",exclude_spec="[1-9][0-9]",**backend_dict)
hosts = m.get_all_hosts()
assert compare_arrays(hosts,range(10)) == True
m = Minions("[1][0-5];@group1:[5-9]",exclude_spec="[1][3-5];@group1:[5-7]",**backend_dict)
hosts = m.get_all_hosts()
assert compare_arrays(hosts,range(8,13)) == True
if __name__ == "__main__":
b = BaseMinions()
b.create_dummy_minions()
#b.clean_dummy_minions()
| gpl-2.0 |
babble/babble | include/jython/Lib/test/test_platform.py | 19 | 1883 | import unittest
from test import test_support
import platform
class PlatformTest(unittest.TestCase):
def test_architecture(self):
res = platform.architecture()
def test_machine(self):
res = platform.machine()
def test_node(self):
res = platform.node()
def test_platform(self):
for aliased in (False, True):
for terse in (False, True):
res = platform.platform(aliased, terse)
def test_processor(self):
res = platform.processor()
def test_python_build(self):
res = platform.python_build()
def test_python_compiler(self):
res = platform.python_compiler()
def test_version(self):
res1 = platform.version()
res2 = platform.version_tuple()
self.assertEqual(res1, ".".join(res2))
def test_release(self):
res = platform.release()
def test_system(self):
res = platform.system()
def test_version(self):
res = platform.version()
def test_system_alias(self):
res = platform.system_alias(
platform.system(),
platform.release(),
platform.version(),
)
def test_uname(self):
res = platform.uname()
def test_java_ver(self):
res = platform.java_ver()
def test_win32_ver(self):
res = platform.win32_ver()
def test_mac_ver(self):
res = platform.mac_ver()
def test_dist(self):
res = platform.dist()
def test_libc_ver(self):
from sys import executable
import os
if os.path.isdir(executable) and os.path.exists(executable+'.exe'):
# Cygwin horror
executable = executable + '.exe'
res = platform.libc_ver(executable)
def test_main():
test_support.run_unittest(
PlatformTest
)
if __name__ == '__main__':
test_main()
| apache-2.0 |
CPFDSoftware-Tony/gmv | utils/Mesa/Mesa-7.8.2/src/gallium/drivers/svga/svgadump/svga_dump.py | 50 | 11879 | #!/usr/bin/env python
'''
Generates dumper for the SVGA 3D command stream using pygccxml.
Jose Fonseca <jfonseca@vmware.com>
'''
copyright = '''
/**********************************************************
* Copyright 2009 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
'''
import os
import sys
from pygccxml import parser
from pygccxml import declarations
from pygccxml.declarations import algorithm
from pygccxml.declarations import decl_visitor
from pygccxml.declarations import type_traits
from pygccxml.declarations import type_visitor
enums = True
class decl_dumper_t(decl_visitor.decl_visitor_t):
def __init__(self, instance = '', decl = None):
decl_visitor.decl_visitor_t.__init__(self)
self._instance = instance
self.decl = decl
def clone(self):
return decl_dumper_t(self._instance, self.decl)
def visit_class(self):
class_ = self.decl
assert self.decl.class_type in ('struct', 'union')
for variable in class_.variables():
if variable.name != '':
#print 'variable = %r' % variable.name
dump_type(self._instance + '.' + variable.name, variable.type)
def visit_enumeration(self):
if enums:
print ' switch(%s) {' % ("(*cmd)" + self._instance,)
for name, value in self.decl.values:
print ' case %s:' % (name,)
print ' _debug_printf("\\t\\t%s = %s\\n");' % (self._instance, name)
print ' break;'
print ' default:'
print ' _debug_printf("\\t\\t%s = %%i\\n", %s);' % (self._instance, "(*cmd)" + self._instance)
print ' break;'
print ' }'
else:
print ' _debug_printf("\\t\\t%s = %%i\\n", %s);' % (self._instance, "(*cmd)" + self._instance)
def dump_decl(instance, decl):
dumper = decl_dumper_t(instance, decl)
algorithm.apply_visitor(dumper, decl)
class type_dumper_t(type_visitor.type_visitor_t):
def __init__(self, instance, type_):
type_visitor.type_visitor_t.__init__(self)
self.instance = instance
self.type = type_
def clone(self):
return type_dumper_t(self.instance, self.type)
def visit_char(self):
self.print_instance('%i')
def visit_unsigned_char(self):
self.print_instance('%u')
def visit_signed_char(self):
self.print_instance('%i')
def visit_wchar(self):
self.print_instance('%i')
def visit_short_int(self):
self.print_instance('%i')
def visit_short_unsigned_int(self):
self.print_instance('%u')
def visit_bool(self):
self.print_instance('%i')
def visit_int(self):
self.print_instance('%i')
def visit_unsigned_int(self):
self.print_instance('%u')
def visit_long_int(self):
self.print_instance('%li')
def visit_long_unsigned_int(self):
self.print_instance('%lu')
def visit_long_long_int(self):
self.print_instance('%lli')
def visit_long_long_unsigned_int(self):
self.print_instance('%llu')
def visit_float(self):
self.print_instance('%f')
def visit_double(self):
self.print_instance('%f')
def visit_array(self):
for i in range(type_traits.array_size(self.type)):
dump_type(self.instance + '[%i]' % i, type_traits.base_type(self.type))
def visit_pointer(self):
self.print_instance('%p')
def visit_declarated(self):
#print 'decl = %r' % self.type.decl_string
decl = type_traits.remove_declarated(self.type)
dump_decl(self.instance, decl)
def print_instance(self, format):
print ' _debug_printf("\\t\\t%s = %s\\n", %s);' % (self.instance, format, "(*cmd)" + self.instance)
def dump_type(instance, type_):
type_ = type_traits.remove_alias(type_)
visitor = type_dumper_t(instance, type_)
algorithm.apply_visitor(visitor, type_)
def dump_struct(decls, class_):
print 'static void'
print 'dump_%s(const %s *cmd)' % (class_.name, class_.name)
print '{'
dump_decl('', class_)
print '}'
print ''
cmds = [
('SVGA_3D_CMD_SURFACE_DEFINE', 'SVGA3dCmdDefineSurface', (), 'SVGA3dSize'),
('SVGA_3D_CMD_SURFACE_DESTROY', 'SVGA3dCmdDestroySurface', (), None),
('SVGA_3D_CMD_SURFACE_COPY', 'SVGA3dCmdSurfaceCopy', (), 'SVGA3dCopyBox'),
('SVGA_3D_CMD_SURFACE_STRETCHBLT', 'SVGA3dCmdSurfaceStretchBlt', (), None),
('SVGA_3D_CMD_SURFACE_DMA', 'SVGA3dCmdSurfaceDMA', (), 'SVGA3dCopyBox'),
('SVGA_3D_CMD_CONTEXT_DEFINE', 'SVGA3dCmdDefineContext', (), None),
('SVGA_3D_CMD_CONTEXT_DESTROY', 'SVGA3dCmdDestroyContext', (), None),
('SVGA_3D_CMD_SETTRANSFORM', 'SVGA3dCmdSetTransform', (), None),
('SVGA_3D_CMD_SETZRANGE', 'SVGA3dCmdSetZRange', (), None),
('SVGA_3D_CMD_SETRENDERSTATE', 'SVGA3dCmdSetRenderState', (), 'SVGA3dRenderState'),
('SVGA_3D_CMD_SETRENDERTARGET', 'SVGA3dCmdSetRenderTarget', (), None),
('SVGA_3D_CMD_SETTEXTURESTATE', 'SVGA3dCmdSetTextureState', (), 'SVGA3dTextureState'),
('SVGA_3D_CMD_SETMATERIAL', 'SVGA3dCmdSetMaterial', (), None),
('SVGA_3D_CMD_SETLIGHTDATA', 'SVGA3dCmdSetLightData', (), None),
('SVGA_3D_CMD_SETLIGHTENABLED', 'SVGA3dCmdSetLightEnabled', (), None),
('SVGA_3D_CMD_SETVIEWPORT', 'SVGA3dCmdSetViewport', (), None),
('SVGA_3D_CMD_SETCLIPPLANE', 'SVGA3dCmdSetClipPlane', (), None),
('SVGA_3D_CMD_CLEAR', 'SVGA3dCmdClear', (), 'SVGA3dRect'),
('SVGA_3D_CMD_PRESENT', 'SVGA3dCmdPresent', (), 'SVGA3dCopyRect'),
('SVGA_3D_CMD_SHADER_DEFINE', 'SVGA3dCmdDefineShader', (), None),
('SVGA_3D_CMD_SHADER_DESTROY', 'SVGA3dCmdDestroyShader', (), None),
('SVGA_3D_CMD_SET_SHADER', 'SVGA3dCmdSetShader', (), None),
('SVGA_3D_CMD_SET_SHADER_CONST', 'SVGA3dCmdSetShaderConst', (), None),
('SVGA_3D_CMD_DRAW_PRIMITIVES', 'SVGA3dCmdDrawPrimitives', (('SVGA3dVertexDecl', 'numVertexDecls'), ('SVGA3dPrimitiveRange', 'numRanges')), 'SVGA3dVertexDivisor'),
('SVGA_3D_CMD_SETSCISSORRECT', 'SVGA3dCmdSetScissorRect', (), None),
('SVGA_3D_CMD_BEGIN_QUERY', 'SVGA3dCmdBeginQuery', (), None),
('SVGA_3D_CMD_END_QUERY', 'SVGA3dCmdEndQuery', (), None),
('SVGA_3D_CMD_WAIT_FOR_QUERY', 'SVGA3dCmdWaitForQuery', (), None),
#('SVGA_3D_CMD_PRESENT_READBACK', None, (), None),
('SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN', 'SVGA3dCmdBlitSurfaceToScreen', (), 'SVGASignedRect'),
]
def dump_cmds():
print r'''
void
svga_dump_command(uint32_t cmd_id, const void *data, uint32_t size)
{
const uint8_t *body = (const uint8_t *)data;
const uint8_t *next = body + size;
'''
print ' switch(cmd_id) {'
indexes = 'ijklmn'
for id, header, body, footer in cmds:
print ' case %s:' % id
print ' _debug_printf("\\t%s\\n");' % id
print ' {'
print ' const %s *cmd = (const %s *)body;' % (header, header)
if len(body):
print ' unsigned ' + ', '.join(indexes[:len(body)]) + ';'
print ' dump_%s(cmd);' % header
print ' body = (const uint8_t *)&cmd[1];'
for i in range(len(body)):
struct, count = body[i]
idx = indexes[i]
print ' for(%s = 0; %s < cmd->%s; ++%s) {' % (idx, idx, count, idx)
print ' dump_%s((const %s *)body);' % (struct, struct)
print ' body += sizeof(%s);' % struct
print ' }'
if footer is not None:
print ' while(body + sizeof(%s) <= next) {' % footer
print ' dump_%s((const %s *)body);' % (footer, footer)
print ' body += sizeof(%s);' % footer
print ' }'
if id == 'SVGA_3D_CMD_SHADER_DEFINE':
print ' svga_shader_dump((const uint32_t *)body,'
print ' (unsigned)(next - body)/sizeof(uint32_t),'
print ' FALSE);'
print ' body = next;'
print ' }'
print ' break;'
print ' default:'
print ' _debug_printf("\\t0x%08x\\n", cmd_id);'
print ' break;'
print ' }'
print r'''
while(body + sizeof(uint32_t) <= next) {
_debug_printf("\t\t0x%08x\n", *(const uint32_t *)body);
body += sizeof(uint32_t);
}
while(body + sizeof(uint32_t) <= next)
_debug_printf("\t\t0x%02x\n", *body++);
}
'''
print r'''
void
svga_dump_commands(const void *commands, uint32_t size)
{
const uint8_t *next = commands;
const uint8_t *last = next + size;
assert(size % sizeof(uint32_t) == 0);
while(next < last) {
const uint32_t cmd_id = *(const uint32_t *)next;
if(SVGA_3D_CMD_BASE <= cmd_id && cmd_id < SVGA_3D_CMD_MAX) {
const SVGA3dCmdHeader *header = (const SVGA3dCmdHeader *)next;
const uint8_t *body = (const uint8_t *)&header[1];
next = body + header->size;
if(next > last)
break;
svga_dump_command(cmd_id, body, header->size);
}
else if(cmd_id == SVGA_CMD_FENCE) {
_debug_printf("\tSVGA_CMD_FENCE\n");
_debug_printf("\t\t0x%08x\n", ((const uint32_t *)next)[1]);
next += 2*sizeof(uint32_t);
}
else {
_debug_printf("\t0x%08x\n", cmd_id);
next += sizeof(uint32_t);
}
}
}
'''
def main():
print copyright.strip()
print
print '/**'
print ' * @file'
print ' * Dump SVGA commands.'
print ' *'
print ' * Generated automatically from svga3d_reg.h by svga_dump.py.'
print ' */'
print
print '#include "svga_types.h"'
print '#include "svga_shader_dump.h"'
print '#include "svga3d_reg.h"'
print
print '#include "util/u_debug.h"'
print '#include "svga_dump.h"'
print
config = parser.config_t(
include_paths = ['../../../include', '../include'],
compiler = 'gcc',
)
headers = [
'svga_types.h',
'svga3d_reg.h',
]
decls = parser.parse(headers, config, parser.COMPILATION_MODE.ALL_AT_ONCE)
global_ns = declarations.get_global_namespace(decls)
names = set()
for id, header, body, footer in cmds:
names.add(header)
for struct, count in body:
names.add(struct)
if footer is not None:
names.add(footer)
for class_ in global_ns.classes(lambda decl: decl.name in names):
dump_struct(decls, class_)
dump_cmds()
if __name__ == '__main__':
main()
| gpl-3.0 |
silly-wacky-3-town-toon/SOURCE-COD | Panda3D-1.10.0/python/Lib/curses/ascii.py | 396 | 2607 | """Constants and membership tests for ASCII characters"""
NUL = 0x00 # ^@
SOH = 0x01 # ^A
STX = 0x02 # ^B
ETX = 0x03 # ^C
EOT = 0x04 # ^D
ENQ = 0x05 # ^E
ACK = 0x06 # ^F
BEL = 0x07 # ^G
BS = 0x08 # ^H
TAB = 0x09 # ^I
HT = 0x09 # ^I
LF = 0x0a # ^J
NL = 0x0a # ^J
VT = 0x0b # ^K
FF = 0x0c # ^L
CR = 0x0d # ^M
SO = 0x0e # ^N
SI = 0x0f # ^O
DLE = 0x10 # ^P
DC1 = 0x11 # ^Q
DC2 = 0x12 # ^R
DC3 = 0x13 # ^S
DC4 = 0x14 # ^T
NAK = 0x15 # ^U
SYN = 0x16 # ^V
ETB = 0x17 # ^W
CAN = 0x18 # ^X
EM = 0x19 # ^Y
SUB = 0x1a # ^Z
ESC = 0x1b # ^[
FS = 0x1c # ^\
GS = 0x1d # ^]
RS = 0x1e # ^^
US = 0x1f # ^_
SP = 0x20 # space
DEL = 0x7f # delete
controlnames = [
"NUL", "SOH", "STX", "ETX", "EOT", "ENQ", "ACK", "BEL",
"BS", "HT", "LF", "VT", "FF", "CR", "SO", "SI",
"DLE", "DC1", "DC2", "DC3", "DC4", "NAK", "SYN", "ETB",
"CAN", "EM", "SUB", "ESC", "FS", "GS", "RS", "US",
"SP"
]
def _ctoi(c):
if type(c) == type(""):
return ord(c)
else:
return c
def isalnum(c): return isalpha(c) or isdigit(c)
def isalpha(c): return isupper(c) or islower(c)
def isascii(c): return _ctoi(c) <= 127 # ?
def isblank(c): return _ctoi(c) in (8,32)
def iscntrl(c): return _ctoi(c) <= 31
def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57
def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126
def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122
def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126
def ispunct(c): return _ctoi(c) != 32 and not isalnum(c)
def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32)
def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90
def isxdigit(c): return isdigit(c) or \
(_ctoi(c) >= 65 and _ctoi(c) <= 70) or (_ctoi(c) >= 97 and _ctoi(c) <= 102)
def isctrl(c): return _ctoi(c) < 32
def ismeta(c): return _ctoi(c) > 127
def ascii(c):
if type(c) == type(""):
return chr(_ctoi(c) & 0x7f)
else:
return _ctoi(c) & 0x7f
def ctrl(c):
if type(c) == type(""):
return chr(_ctoi(c) & 0x1f)
else:
return _ctoi(c) & 0x1f
def alt(c):
if type(c) == type(""):
return chr(_ctoi(c) | 0x80)
else:
return _ctoi(c) | 0x80
def unctrl(c):
bits = _ctoi(c)
if bits == 0x7f:
rep = "^?"
elif isprint(bits & 0x7f):
rep = chr(bits & 0x7f)
else:
rep = "^" + chr(((bits & 0x7f) | 0x20) + 0x20)
if bits & 0x80:
return "!" + rep
return rep
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.